How to use d_h2 method in fMBT

Best Python code snippet using fMBT_python

GAR.py

Source:GAR.py Github

copy

Full Screen

1import numpy as np2import math3import torch4import torch.nn as nn5import torch.nn.functional as F6from torch.autograd import Variable7from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence8from .fusion.fusionPlugin import fusionPlugin9class GRUencoder(nn.Module):10 def __init__(self, d_emb, d_out, num_layers):11 super(GRUencoder, self).__init__()12 # default encoder 2 layers13 self.gru = nn.GRU(input_size=d_emb, hidden_size=d_out,14 bidirectional=True, num_layers=num_layers)15 def forward(self, sent, sent_lens):16 """17 :param sent: torch tensor, batch_size x seq_len x d_rnn_in18 :param sent_lens: numpy tensor, batch_size x 119 :return:20 """21 device = sent.device22 # seq_len x batch_size x d_rnn_in23 sent_embs = sent.transpose(0,1)24 # sort by length25 s_lens, idx_sort = np.sort(sent_lens)[::-1], np.argsort(-sent_lens)26 idx_unsort = np.argsort(idx_sort)27 idx_sort = torch.from_numpy(idx_sort).cuda(device)28 # idx_sort = idx_sort.cuda(device)29 s_embs = sent_embs.index_select(1, Variable(idx_sort))30 # padding31 s_lens = s_lens.copy()32 sent_packed = pack_padded_sequence(s_embs, s_lens)33 sent_output = self.gru(sent_packed)[0]34 sent_output = pad_packed_sequence(sent_output, total_length=sent.size(1))[0]35 # unsort by length36 idx_unsort = torch.from_numpy(idx_unsort).cuda(device)37 # idx_unsort = idx_unsort.cuda(device)38 sent_output = sent_output.index_select(1, Variable(idx_unsort))39 # batch x seq_len x 2*d_out40 output = sent_output.transpose(0,1)41 return output42class DialogEncoder(nn.Module):43 def __init__(self, d_h1, d_h2, layers=2, drop=0.5):44 super(DialogEncoder, self).__init__()45 self.dialog_gru = nn.GRU(d_h1, d_h2, num_layers=1, bidirectional=False)46 d_h2 = d_h1 + d_h2 #+ sp_size47 self.fc = nn.Sequential(48 nn.Linear(d_h2, d_h1),49 nn.Tanh()50 )51 self.out_dim = d_h152 d_h2 = d_h153 self.dropout_mid = nn.Dropout(drop)54 self.gat = nn.ModuleList([GAT(d_h2) for _ in range(layers)])55 self.output2 = nn.Sequential(56 nn.Linear(d_h2, d_h2),57 nn.Tanh(),58 )59 def forward(self, x, sp=None):60 # self.dialog_gru61 s_context = self.dialog_gru(x.unsqueeze(1))[0]62 s_context = s_context.transpose(0,1).contiguous()63 out = torch.cat([s_context, x.unsqueeze(0)], dim=-1)64 out = self.fc(out)65 out = self.dropout_mid(out)66 for m in self.gat:67 out = m(out)68 return self.output2(out.squeeze(0))69class SentenceEncoder(nn.Module):70 def __init__(self, d_emb, d_out, num_layers, drop = 0.5):71 super(SentenceEncoder, self).__init__()72 self.gru = GRUencoder(d_emb, d_out, num_layers)73 self.cnn = nn.Conv1d(d_out*2, 1, kernel_size=3, stride=1, padding=1)74 fc_size = d_out*2 + d_emb75 self.dropout_in = nn.Dropout(drop)76 self.fc = nn.Linear(fc_size, d_out)77 def forward(self, x, x_lens):78 gru_x = self.gru(x, x_lens)79 # gru_x = torch.tanh(gru_x)80 g = self.cnn(gru_x.transpose(1, 2)).transpose(1, 2)81 gate_x = torch.tanh(gru_x) * F.sigmoid(g)82 combined = [x, gate_x]83 combined = torch.cat(combined, dim=-1)84 s_embed = self.fc(combined)85 s_embed = torch.tanh(s_embed)86 s_embed = torch.max(s_embed, dim=1)[0]87 s_embed = self.dropout_in(s_embed)88 return s_embed89class GAR(nn.Module):90 def __init__(self, args, embedding):91 super(GAR, self).__init__()92 self.d_h2 = args.d_h293 # load word2vec94 self.embeddings = embedding95 self.roberta = args.roberta96 self.multi = args.multi97 if args.multi:98 self.fusion = fusionPlugin(args)99 self.sent_encoder = SentenceEncoder(args.d_word_vec, args.d_h1, num_layers=1, drop=args.drop)100 self.dialog_encoder = DialogEncoder(args.d_h1, args.d_h2, args.ll, drop=args.drop)101 self.num_classes = args.num_classes102 d_h2 = self.dialog_encoder.out_dim#-sp_size103 self.classifier = nn.Linear(d_h2, self.num_classes)104 def forward(self, sents, lens, video = None, audio =None):105 if len(sents.size()) < 2:106 sents = sents.unsqueeze(0)107 if self.roberta:108 w_embed = self.embeddings(sents, lens)109 else:110 w_embed = self.embeddings(sents)111 # sentence112 s_embed = self.sent_encoder(w_embed, lens)113 # dialogs114 if self.multi:115 s_embed = self.fusion(s_embed, video, audio)116 out = self.dialog_encoder(s_embed)117 # classifier118 out = self.classifier(out)119 pred_scores = F.log_softmax(out, dim=1)120 return pred_scores121class GAT(nn.Module):122 def __init__(self, hidden_size):123 super().__init__()124 self.wa = nn.Linear(hidden_size, hidden_size)125 self.wb = nn.Linear(hidden_size, hidden_size)126 self.wv = nn.Linear(hidden_size, hidden_size)127 self.wc = nn.Linear(2*hidden_size, 1)128 self.mlp = nn.Linear(hidden_size, hidden_size)129 def forward(self, x):130 # x-> b, c, d131 # x = x.unsqueeze(0)132 a = self.wa(x).unsqueeze(2).expand(-1, -1, x.size(1), -1)133 b = self.wb(x).unsqueeze(1).expand(-1, x.size(1), -1, -1)134 c = torch.cat([a, b], dim=3)135 c = self.wc(c).squeeze(3)136 c = F.leaky_relu(c)137 c = F.softmax(c, dim=2)138 out = torch.einsum('blc, bcd->bld', c, self.wv(x))139 out = torch.tanh(self.mlp(out))+x...

Full Screen

Full Screen

my_net.py

Source:my_net.py Github

copy

Full Screen

1import numpy as np2import tensorflow as tf3from sklearn.utils import shuffle4from time import clock5###############################################################6#7# Important notes: 8# - Do not change any of the existing functions or parameter names, 9# except in __init__, you may add/change parameter names/defaults values.10# - In __init__ set default values to the best ones, e.g., learning_rate=0.111# - Training epochs/iterations should not be a parameter to __init__,12# To train/test your network, we will call fit(...) until time (2 mins) runs out.13#14###############################################################15class Network():16 def __init__(self, learning_rate=0.005):17 ''' initialize the classifier with default (best) parameters '''18 self.alpha = learning_rate19 self.batch_size = 10020 self.display_step = 1021 22 self.d_X = 29423 self.d_Y = 624 self.x = tf.placeholder(tf.float32,[None, self.d_X],name = 'x')25 self.y = tf.placeholder(tf.float32,[None, self.d_Y], name = 'y')26 27 28 self.d_h1 = 6429 self.d_h2 = 3230 self.d_h3 = 1631 W = {"h1": tf.Variable(tf.random_normal([self.d_X, self.d_h1])),32 "h2": tf.Variable(tf.random_normal([self.d_h1, self.d_h2])),33 "h3": tf.Variable(tf.random_normal([self.d_h2, self.d_h3])),34 "out": tf.Variable(tf.random_normal([self.d_h3, self.d_Y]))}35 36 b = {"h1": tf.Variable(tf.random_normal([self.d_h1])),37 "h2": tf.Variable(tf.random_normal([self.d_h2])),38 "h3": tf.Variable(tf.random_normal([self.d_h3])),39 "out": tf.Variable(tf.random_normal([self.d_Y]))}40 41 self.layer1 = tf.nn.sigmoid(tf.matmul(self.x,W["h1"])+b["h1"])42 self.layer2 = tf.nn.sigmoid(tf.matmul(self.layer1,W["h2"])+b["h2"])43 self.layer3 = tf.nn.sigmoid(tf.matmul(self.layer2,W["h3"])+b["h3"])44 self.pred_logits = tf.matmul(self.layer3,W["out"])+b["out"]45 self.pred = tf.nn.sigmoid(self.pred_logits)46 47 48 """49 self.d_h1 = 12850 self.d_h2 = 6451 self.d_h3 = 3252 self.d_h4 = 1653 W = {"h1": tf.Variable(tf.random_normal([self.d_X, self.d_h1])),54 "h2": tf.Variable(tf.random_normal([self.d_h1, self.d_h2])),55 "h3": tf.Variable(tf.random_normal([self.d_h2, self.d_h3])),56 "h4": tf.Variable(tf.random_normal([self.d_h3, self.d_h4])),57 "out": tf.Variable(tf.random_normal([self.d_h4, self.d_Y]))}58 59 b = {"h1": tf.Variable(tf.random_normal([self.d_h1])),60 "h2": tf.Variable(tf.random_normal([self.d_h2])),61 "h3": tf.Variable(tf.random_normal([self.d_h3])),62 "h4": tf.Variable(tf.random_normal([self.d_h4])),63 "out": tf.Variable(tf.random_normal([self.d_Y]))}64 65 self.layer1 = tf.nn.sigmoid(tf.matmul(self.x,W["h1"])+b["h1"])66 self.layer2 = tf.nn.sigmoid(tf.matmul(self.layer1,W["h2"])+b["h2"])67 self.layer3 = tf.nn.sigmoid(tf.matmul(self.layer2,W["h3"])+b["h3"])68 self.layer4 = tf.nn.sigmoid(tf.matmul(self.layer3,W["h4"])+b["h4"])69 self.pred_logits = tf.matmul(self.layer4,W["out"])+b["out"]70 self.pred = tf.nn.sigmoid(self.pred_logits)71 """72 73 #self.cost = -tf.reduce_sum(self.y*tf.log(self.pred)+(1-self.y)*tf.log(1-self.pred))74 self.cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels = self.y, logits = self.pred_logits))75 self.optimizer = tf.train.AdamOptimizer(self.alpha).minimize(self.cost)76 77 self.init = tf.global_variables_initializer()78 self.sess = tf.Session()79 self.sess.run(self.init)80 81 def fit(self,X,Y,warm_start=True,n_epochs=10):82 ''' train the network, and if warm_start, then do not reinit. the network83 (if it has already been initialized)84 '''85 X = np.float32(X)86 Y = np.float32(Y)87 self.n_labels = Y.shape[1]88 self.n = Y.shape[0]89 total_batch = self.n//self.batch_size90 if not warm_start:91 self.sess.run(self.init)92 93 94 for epoch in range(n_epochs):95 X,Y = shuffle(X,Y)96 avg_cost = 097 for i in range(total_batch-1):98 x_batch, y_batch = X[i*self.batch_size:(i+1)*self.batch_size,:], Y[i*self.batch_size:(i+1)*self.batch_size,:]99 _, c = self.sess.run([self.optimizer, self.cost], 100 feed_dict={'x:0': x_batch, 'y:0':y_batch })101 avg_cost += c / total_batch102 103 _, c = self.sess.run([self.optimizer, self.cost], 104 feed_dict={'x:0': X[(total_batch-1)*self.batch_size:],105 'y:0': Y[(total_batch-1)*self.batch_size:]})106 avg_cost += c / total_batch107 if (epoch+1) % self.display_step == 0:108 print("Epoch: "+str(epoch+1)+", cost={}".format(avg_cost))109 #print("Optimization Finished!")110 111 return self112 def predict_proba(self,X):113 ''' return a matrix P where P[i,j] = P(Y[i,j]=1), 114 for all instances i, and labels j. '''115 X = np.float32(X)116 return self.pred.eval(session = self.sess, feed_dict={'x:0': X})117 def predict(self,X):118 ''' return a matrix of predictions for X '''...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful