How to use evaluate_all method in Playwright Python

Best Python code snippet using playwright-python

hole.py

Source:hole.py Github

copy

Full Screen

1from __future__ import print_function2from collections import OrderedDict3import keras4import theano as th5import theano.tensor as T6from keras import backend as K7from keras.optimizers import Adagrad, SGD8import keras9from keras.engine.topology import Layer10from keras.models import Sequential, Model11from keras.layers import merge, Input, Embedding, Dropout, Convolution1D, Lambda, Activation, LSTM, Dense, TimeDistributed, \12 ActivityRegularization, Reshape, Flatten13from keras.constraints import unitnorm14import os15import sys16import random17import numpy as np18from time import strftime, gmtime19import six.moves.cPickle as pickle20from keras.optimizers import RMSprop, Adam, SGD, Adadelta, Adagrad21from scipy.stats import rankdata22__author__ = 'nilesh'23class KgeModel:24 def __init__(self, config):25 self.subject = Input(shape=(config['subject_len'],), dtype='int32', name='subject_base')26 self.subject_bad = Input(shape=(config['subject_len'],), dtype='int32', name='subject_bad_base')27 self.relation = Input(shape=(config['relation_len'],), dtype='int32', name='relation_base')28 self.object_good = Input(shape=(config['object_len'],), dtype='int32', name='object_good_base')29 self.object_bad = Input(shape=(config['object_len'],), dtype='int32', name='object_bad_base')30 self.config = config31 self.model_params = config.get('model_params', dict())32 self.similarity_params = config.get('similarity_params', dict())33 # initialize a bunch of variables that will be set later34 self._models = None35 self._similarities = None36 self._object = None37 self._subject = None38 self._kge_model = None39 self.training_model = None40 self.prediction_model = None41 def get_object(self):42 if self._object is None:43 self._object = Input(shape=(self.config['object_len'],), dtype='int32', name='object')44 return self._object45 def get_subject(self):46 if self._subject is None:47 self._subject = Input(shape=(self.config['subject_len'],), dtype='int32', name='subject')48 return self._subject49 # @abstractmethod50 def build(self):51 return52 def get_similarity(self):53 ''' Specify similarity in configuration under 'similarity_params' -> 'mode'54 If a parameter is needed for the model, specify it in 'similarity_params'55 Example configuration:56 config = {57 ... other parameters ...58 'similarity_params': {59 'mode': 'gesd',60 'gamma': 1,61 'c': 1,62 }63 }64 cosine: dot(a, b) / sqrt(dot(a, a) * dot(b, b))65 polynomial: (gamma * dot(a, b) + c) ^ d66 sigmoid: tanh(gamma * dot(a, b) + c)67 rbf: exp(-gamma * l2_norm(a-b) ^ 2)68 euclidean: 1 / (1 + l2_norm(a - b))69 exponential: exp(-gamma * l2_norm(a - b))70 gesd: euclidean * sigmoid71 aesd: (euclidean + sigmoid) / 272 '''73 params = self.similarity_params74 similarity = params['mode']75 axis = lambda a: len(a._keras_shape) - 176 dot = lambda a, b: K.batch_dot(a, b, axes=axis(a))77 l2_norm = lambda a, b: K.sqrt(K.sum((a - b) ** 2, axis=axis(a), keepdims=True))78 l1_norm = lambda a, b: K.sum(K.abs(a - b), axis=axis(a), keepdims=True)79 if similarity == 'cosine':80 return lambda x: dot(x[0], x[1]) / K.sqrt(dot(x[0], x[0]) * dot(x[1], x[1]))81 elif similarity == 'polynomial':82 return lambda x: (params['gamma'] * dot(x[0], x[1]) + params['c']) ** params['d']83 elif similarity == 'sigmoid':84 return lambda x: K.tanh(params['gamma'] * dot(x[0], x[1]) + params['c'])85 elif similarity == 'rbf':86 return lambda x: K.exp(-1 * params['gamma'] * l2_norm(x[0], x[1]) ** 2)87 elif similarity == 'euclidean':88 return lambda x: 1 / (1 + l2_norm(x[0], x[1]))89 elif similarity == 'l1':90 return lambda x: -l1_norm(x[0], x[1])91 elif similarity == 'exponential':92 return lambda x: K.exp(-1 * params['gamma'] * l2_norm(x[0], x[1]))93 elif similarity == 'gesd':94 euclidean = lambda x: 1 / (1 + l2_norm(x[0], x[1]))95 sigmoid = lambda x: 1 / (1 + K.exp(-1 * params['gamma'] * (dot(x[0], x[1]) + params['c'])))96 return lambda x: euclidean(x) * sigmoid(x)97 elif similarity == 'aesd':98 euclidean = lambda x: 0.5 / (1 + l2_norm(x[0], x[1]))99 sigmoid = lambda x: 0.5 / (1 + K.exp(-1 * params['gamma'] * (dot(x[0], x[1]) + params['c'])))100 return lambda x: euclidean(x) + sigmoid(x)101 else:102 raise Exception('Invalid similarity: {}'.format(similarity))103 def get_kge_model(self):104 if self._models is None:105 self._models = self.build()106 if self._kge_model is None:107 subject_output, relation_output, object_output = self._models108 # relation_output2 = Reshape((100,100))(relation_output)109 sp_output = merge([subject_output, relation_output], mode='sum')110 # so_output = merge([subject_output, object_output], mode=lambda x: np.outer(x[0], x[1]).reshape(100000,))111 # spo_output = merge([sp_output, Reshape((0,100))(object_output)], mode=lambda a, b: K.batch_dot(a, b, axes=len(a._keras_shape) - 1),112 # output_shape=lambda x: x[0])113 spo_output = merge([sp_output, object_output], mode='sum', output_shape=lambda x: x[:-1])114 self._kge_model = Model(input=[self.subject, self.relation, self.get_object()], output=[spo_output])115 return self._kge_model116 def compile(self, optimizer, **kwargs):117 kge_model = self.get_kge_model()118 good_output = kge_model([self.subject, self.relation, self.object_good])119 bad_output = kge_model([self.subject, self.relation, self.object_bad])120 loss = merge([good_output, bad_output],121 mode=lambda x: K.maximum(1e-6, self.config['margin'] - x[0] + x[1]),122 output_shape=lambda x: x[0])123 self.training_model = Model(input=[self.subject, self.relation, self.object_good, self.object_bad], output=loss)124 self.training_model.compile(loss=lambda y_true, y_pred: y_pred + y_true - y_true, optimizer=optimizer, **kwargs)125 self.prediction_model = Model(input=[self.subject, self.relation, self.object_good], output=good_output)126 self.prediction_model.compile(loss='binary_crossentropy', optimizer=optimizer, **kwargs)127 self.training_model.summary()128 def fit(self, x, **kwargs):129 assert self.training_model is not None, 'Must compile the model before fitting data'130 y = np.zeros(shape=x[0].shape[:1])131 return self.training_model.fit(x, y, **kwargs)132 def train_on_batch(self, x, **kwargs):133 assert self.training_model is not None, 'Must compile the model before fitting data'134 y = np.zeros(shape=x[0].shape[:1])135 return self.training_model.train_on_batch(x, y, **kwargs)136 def predict(self, x, **kwargs):137 return self.prediction_model.predict(x, **kwargs)138 def save_weights(self, file_name, **kwargs):139 assert self.prediction_model is not None, 'Must compile the model before saving weights'140 self.prediction_model.save_weights(file_name, **kwargs)141 def load_weights(self, file_name, **kwargs):142 assert self.prediction_model is not None, 'Must compile the model loading weights'143 self.prediction_model.load_weights(file_name, **kwargs)144class RescalModel(KgeModel):145 def build(self):146 subject = self.subject147 relation = self.relation148 object_ = self.get_object()149 embedding_size = self.model_params.get('n_embed_dims', 100)150 # add embedding layers151 embedding_rel = Embedding(input_dim=self.config['n_rel'],152 output_dim=self.model_params.get('n_embed_dims', 100),153 init='he_uniform',154 mask_zero=False)155 embedding_ent = Embedding(input_dim=self.config['n_ent'],156 output_dim=self.model_params.get('n_embed_dims', 100),157 init='he_uniform',158 W_constraint=unitnorm(axis=1),159 mask_zero=False)160 subject_embedding = embedding_ent(subject)161 relation_embedding = embedding_rel(relation)162 object_embedding = embedding_ent(object_)163 subject_output = Reshape((embedding_size,))(subject_embedding)164 relation_output = Reshape((embedding_size,))(relation_embedding)165 object_output = Reshape((embedding_size,))(object_embedding)166 return subject_output, relation_output, object_output167random.seed(42)168os.environ['FREEBASE_15K'] = 'data/freebase15k'169class Evaluator:170 def __init__(self, conf=None):171 try:172 data_path = os.environ['FREEBASE_15K']173 except KeyError:174 print("FREEBASE_15K is not set.")175 sys.exit(1)176 self.path = data_path177 self.conf = dict() if conf is None else conf178 self.params = conf.get('training_params', dict())179 self.entity = self.load('freebase_15k-id2entity.pkl')180 self._vocab = None181 self._reverse_vocab = None182 self._eval_sets = None183 ##### Resources #####184 def load(self, name):185 return pickle.load(open(os.path.join(self.path, name), 'rb'))186 def vocab(self):187 if self._vocab is None:188 self._vocab = self.load('vocabulary')189 return self._vocab190 def reverse_vocab(self):191 if self._reverse_vocab is None:192 vocab = self.vocab()193 self._reverse_vocab = dict((v.lower(), k) for k, v in vocab.items())194 return self._reverse_vocab195 ##### Loading / saving #####196 def save_epoch(self, model, epoch):197 if not os.path.exists('models/freebase_models/embedding/'):198 os.makedirs('models/freebase_models/embedding/')199 model.save_weights('models/freebase_models/embedding/weights_epoch_%d.h5' % epoch, overwrite=True)200 def load_epoch(self, model, epoch):201 assert os.path.exists('models/freebase_models/embedding/weights_epoch_%d.h5' % epoch),\202 'Weights at epoch %d not found' % epoch203 model.load_weights('models/freebase_models/embedding/weights_epoch_%d.h5' % epoch)204 ##### Converting / reverting #####205 def convert(self, words):206 rvocab = self.reverse_vocab()207 if type(words) == str:208 words = words.strip().lower().split(' ')209 return [rvocab.get(w, 0) for w in words]210 def revert(self, indices):211 vocab = self.vocab()212 return [vocab.get(i, 'X') for i in indices]213 ##### Padding #####214 def padq(self, data):215 return self.pad(data, self.conf.get('question_len', None))216 def pada(self, data):217 return self.pad(data, self.conf.get('answer_len', None))218 def pad(self, data, len=None):219 from keras.preprocessing.sequence import pad_sequences220 return pad_sequences(data, maxlen=len, padding='post', truncating='post', value=0)221 ##### Training #####222 def print_time(self):223 print(strftime('%Y-%m-%d %H:%M:%S :: ', gmtime()), end='')224 def train(self, model):225 eval_every = self.params.get('eval_every', None)226 save_every = self.params.get('save_every', None)227 batch_size = self.params.get('batch_size', 128)228 nb_epoch = self.params.get('nb_epoch', 10)229 split = self.params.get('validation_split', 0)230 training_set = self.load('freebase_15k-train.pkl')231 valid_set = self.load('freebase_15k-valid.pkl')232 subjects = list()233 relations = list()234 good_objects = list()235 for line in training_set:236 triplet = line.split('\t')237 subjects += [[int(triplet[0])]]238 relations += [[int(triplet[1])]]239 good_objects += [[int(triplet[2])]]240 subjects = np.asarray(subjects)241 relations = np.asarray(relations)242 good_objects = np.asarray(good_objects)243 # subjects_valid = list()244 # relations_valid = list()245 # good_objects_valid = list()246 #247 # for line in valid_set:248 # triplet = line.split('\t')249 # subjects_valid += [[int(triplet[0])]]250 # relations_valid += [[int(triplet[1])]]251 # good_objects_valid += [[int(triplet[2])]]252 # subjects_valid = np.asarray(subjects_valid)253 # relations_valid = np.asarray(relations_valid)254 # good_objects_valid = np.asarray(good_objects_valid)255 val_loss = {'loss': 1., 'epoch': 0}256 for i in range(1, nb_epoch+1):257 # bad_answers = np.roll(good_answers, random.randint(10, len(questions) - 10))258 # bad_answers = good_answers.copy()259 # random.shuffle(bad_answers)260 bad_objects = np.asarray([[int(random.choice(list(self.entity.keys())))] for _ in range(len(good_objects))])261 # shuffle questionsj262 # zipped = zip(questions, good_answers)263 # random.shuffle(zipped)264 # questions[:], good_answers[:] = zip(*zipped)265 print('Epoch %d :: ' % i, end='')266 self.print_time()267 model.fit([subjects, relations, good_objects, bad_objects], nb_epoch=1, batch_size=batch_size)268 # if hist.history['val_loss'][0] < val_loss['loss']:269 # val_loss = {'loss': hist.history['val_loss'][0], 'epoch': i}270 # print('Best: Loss = {}, Epoch = {}'.format(val_loss['loss'], val_loss['epoch']))271 if eval_every is not None and i % eval_every == 0:272 self.get_mrr(model)273 if save_every is not None and i % save_every == 0:274 self.save_epoch(model, i)275 ##### Evaluation #####276 def prog_bar(self, so_far, total, n_bars=20):277 n_complete = int(so_far * n_bars / total)278 if n_complete >= n_bars - 1:279 print('\r[' + '=' * n_bars + ']', end='')280 else:281 s = '\r[' + '=' * (n_complete - 1) + '>' + '.' * (n_bars - n_complete) + ']'282 print(s, end='')283 def eval_sets(self):284 if self._eval_sets is None:285 self._eval_sets = dict([(s, self.load(s)) for s in ['freebase_15k-test.pkl']])286 return self._eval_sets287 def get_mrr(self, model, evaluate_all=False):288 top1s = list()289 mrrs = list()290 for name, data in self.eval_sets().items():291 if evaluate_all:292 self.print_time()293 print('----- %s -----' % name)294 random.shuffle(data)295 if not evaluate_all and 'n_eval' in self.params:296 data = data[:self.params['n_eval']]297 # c_1 for hit@1, c_3 for hit@3, c_10 for hit@10298 c_1, c_3, c_10 = 0, 0, 0299 mean_ranks = list()300 for i, d in enumerate(data):301 triplet = d.split('\t')302 if evaluate_all:303 self.prog_bar(i, len(data))304 candidate_objects = self.entity.keys()305 candidate_objects.remove(int(triplet[2]))306 subject = np.asarray([[int(triplet[0])]] * (len(candidate_objects)+1))307 relation = np.asarray([[int(triplet[1])]] * (len(candidate_objects)+1))308 objects = np.asarray([[int(triplet[2])]] + [[entity_id] for entity_id in candidate_objects])309 sims = model.predict([subject, relation, objects], batch_size=len(self.entity)).flatten()310 r = rankdata(sims, method='max')311 target_rank = r[0]312 num_candidate = len(sims)313 real_rank = num_candidate - target_rank + 1314 # print(' '.join(self.revert(d['question'])))315 # print(' '.join(self.revert(self.answers[indices[max_r]])))316 # print(' '.join(self.revert(self.answers[indices[max_n]])))317 c_1 += 1 if target_rank == num_candidate else 0318 c_3 += 1 if target_rank + 3 > num_candidate else 0319 c_10 += 1 if target_rank + 10 > num_candidate else 0320 mean_ranks.append(real_rank)321 # c_2 += 1 / float(r[max_r] - r[max_n] + 1)322 hit_at_1 = c_1 / float(len(data))323 hit_at_3 = c_3 / float(len(data))324 hit_at_10 = c_10 / float(len(data))325 avg_rank = np.mean(mean_ranks)326 del data327 if evaluate_all:328 print('Hit@1 Precision: %f' % hit_at_1)329 print('Hit@3 Precision: %f' % hit_at_3)330 print('Hit@10 Precision: %f' % hit_at_10)331 print('Mean Rank: %f' % avg_rank)332 # top1s.append(top1)333 # mrrs.append(mrr)334 # rerun the evaluation if above some threshold335 if not evaluate_all:336 print('Top-1 Precision: {}'.format(top1s))337 print('MRR: {}'.format(mrrs))338 evaluate_all_threshold = self.params.get('evaluate_all_threshold', dict())339 evaluate_mode = evaluate_all_threshold.get('mode', 'all')340 mrr_theshold = evaluate_all_threshold.get('mrr', 1)341 top1_threshold = evaluate_all_threshold.get('top1', 1)342 if evaluate_mode == 'any':343 evaluate_all = evaluate_all or any([x >= top1_threshold for x in top1s])344 evaluate_all = evaluate_all or any([x >= mrr_theshold for x in mrrs])345 else:346 evaluate_all = evaluate_all or all([x >= top1_threshold for x in top1s])347 evaluate_all = evaluate_all or all([x >= mrr_theshold for x in mrrs])348 if evaluate_all:349 return self.get_mrr(model, evaluate_all=True)350if __name__ == '__main__':351 conf = {352 'subject_len': 1,353 'relation_len': 1,354 'object_len': 1,355 'n_rel': 1345, # len(vocabulary)356 'n_ent': 14951,357 'margin': 0.2,358 'training_params': {359 'save_every': 100,360 'eval_every': 1,361 'batch_size': 128,362 'nb_epoch': 1000,363 'validation_split': 0,364 'optimizer': Adam(),365 # 'optimizer': Adam(clip_norm=0.1),366 # 'n_eval': 100,367 'evaluate_all_threshold': {368 'mode': 'all',369 'top1': 0.4,370 },371 },372 'model_params': {373 'n_embed_dims': 100,374 'n_hidden': 200,375 # convolution376 'nb_filters': 1000, # * 4377 'conv_activation': 'relu',378 # recurrent379 'n_lstm_dims': 141, # * 2380 # 'initial_embed_weights': np.load('models/wordnet_word2vec_1000_dim.h5'),381 },382 'similarity_params': {383 'mode': 'cosine',384 'gamma': 1,385 'c': 1,386 'd': 2,387 }388 }389 evaluator = Evaluator(conf)390 ##### Embedding model ######391 model = RescalModel(conf)392 optimizer = conf.get('training_params', dict()).get('optimizer', 'adam')393 # TransE model394 # model = TranEModel(conf)395 # optimizer = conf.get('training_params', dict()).get('optimizer', 'adam')396 model.compile(optimizer=optimizer)397 # save embedding layer398 # evaluator.load_epoch(model, 33)399 # embedding_layer = model.prediction_model.layers[2].layers[2]400 # evaluator.load_epoch(model, 100)401 # evaluator.train(model)402 # weights = embedding_layer.get_weights()[0]403 # np.save(open('models/embedding_1000_dim.h5', 'wb'), weights)404 # train the model405 # evaluator.load_epoch(model, 54)406 evaluator.train(model)407 # embedding_matrix = model.prediction_model.layers[3].layers[3].get_weights()[0]408 # print(np.linalg.norm(embedding_matrix[1, :]))409 # print(np.linalg.norm(embedding_matrix[:, 1]))410 # evaluate mrr for a particular epoch411 # evaluator.load_epoch(model, 5)412 # evaluator.get_mrr(model, evaluate_all=True)413# class HolE(Layer):414# def __init__(self, ndim=50, marge=1., lremb=0.1, lrparam=1., **kwargs):415# super().__init__(**kwargs)416# self.ndim = ndim417# self.marge = marge418# self.lremb = lremb419# self.lrparam = lrparam420# import itertools421# import logging422# import numpy as np423# import os424# import time425# import theano as th426# import theano.tensor as T427# from .gradient_descent import gd428# from ..data_structures import triple_tensor as tt429# from ..experiments.metrics import auprc430# from .optimization import sgd_on_triples431# from ..experiments.helper import tolist432# _log = logging.getLogger(__name__)433# DTYPE = th.config.floatX # @UndefinedVariable434# def init_uniform(rng, n, d, dtype=np.float32):435# wbound = np.sqrt(6. / d)436# W_values = rng.uniform(low=-wbound, high=wbound, size=(d, n))437# W_values = W_values / np.sqrt(np.sum(W_values ** 2, axis=0))438# W_values = np.asarray(W_values, dtype=dtype)439# return W_values.T440# class TranslationalEmbeddingsModel(object):441# """Translational Embeddings Model.442# Implementation of TransE:443# Antoine Bordes, Nicolas Usunier, Alberto Garcia-Duran, Jason Weston, Oksana444# Yakhnenko. Translating Embeddings for Modeling Multi-relational Data.445# NIPS 2013446# Parameters447# ----------448# consider_tc : bool449# Whether or not to consider information about type constraints in the450# data.451# Defaults to True.452# simfn : string.453# 'L1' or 'L2' similarity function.454# Defaults to 'L1'.455# ndim : int456# Dimension of the latent embeddings (rank).457# Defaults to 50.458# marge : float459# Margin in the margin based ranking function (gamma in the paper).460# Defaults to 1.461# lremb : float462# Learning rate for latent embeddings.463# Defaults to 0.1.464# lrparam : float465# Learning rate for other parameters.466# Defaults to 1.0.467# mbatchsize : int468# Size of the minibatch.469# Defaults to 128.470# totepoches : int471# Maximum epoches (how often the model is trained on the complete472# dataset).473# Defaults to 500.474# neval : int475# Validate performance every nth minibatch.476# Defaults to 1.477# lcwa : bool478# If true and consider_tc is True, approximate the type constraints from479# the data with the local closed-world assumption.480# Defaults to `False`.481# seed : int482# Seed used for random number generation.483# Defaults to 123.484# savepath : string485# Location where to save the best model parameters.486# Defaults to ./transE.487# """488# def __init__(self, consider_tc=True, simfn='L1', ndim=50, marge=1.,489# lremb=0.1, lrparam=1., mbatchsize=128, maxepoch=500,490# neval=100, lcwa=False, seed=123, conv=1e-4,491# savepath='./transE', dtype=DTYPE,492# mid=np.random.randint(1000000)):493# model_id = (time.strftime('%d_%m_%y___%H_%M_%S') +494# '%d-%d_' % (mid, np.random.randint(100000)))495# self.simfn = simfn496# self.ndim = ndim497# self.marge = marge498# self.lremb = lremb499# self.lrparam = lrparam500# self.mbatchsize = mbatchsize501# self.maxepoch = maxepoch502# self.neval = neval503# self.seed = seed504# self.corrupted = 1505# self.corrupted_axes = [0, 1]506# self.rng = np.random.RandomState(seed)507# self.dtype = dtype508# self.consider_tc = consider_tc509# self.lcwa = lcwa510# self.conv = conv511# self.params = [ndim, marge, lremb, lrparam, simfn, seed, consider_tc,512# lcwa]513# self.parallization_precautions = False514# self.savefile = os.path.join(savepath,515# model_id+type(self).__name__+".pkl")516# # create path where the model is saved517# if not os.path.isdir(savepath):518# os.mkdir(savepath)519# def __graph_pred(self, X):520# # Translational Embeddings Function d(h+l,t)521# e = self.E[X[:, :2].T.reshape((-1,))]522# h = e[:e.shape[0]//2]523# l = self.R[X[:, 2]]524# t = e[e.shape[0]//2:]525# return (-T.sum(T.abs_((h+l)-t), axis=1)526# if self.simfn == 'L1'527# else - T.sqrt(T.sum(T.sqr((h+l)-t), axis=1)))528# def __graph_train(self, X, Xc):529# # Translational Embeddings max-margin loss function530# E = self.E[T.concatenate([X[:, :2], Xc[:, :2]],531# axis=1).T.reshape((-1,))]532# R = self.R[T.concatenate([X[:, 2], Xc[:, 2]])]533# e = E[:E.shape[0]//2]534# h = e[:e.shape[0]//2]535# l = R[:R.shape[0]//2]536# t = e[e.shape[0]//2:]537# outputX = (-T.sum(T.abs_((h+l)-t), axis=1)538# if self.simfn == 'L1'539# else - T.sqrt(T.sum(T.sqr((h+l)-t), axis=1)))540# ec = E[E.shape[0]//2:]541# hc = ec[:ec.shape[0]//2]542# lc = R[R.shape[0]//2:]543# tc = ec[ec.shape[0]//2:]544# outputXc = (-T.sum(T.abs_((hc+lc)-tc), axis=1)545# if self.simfn == 'L1'546# else - T.sqrt(T.sum(T.sqr((hc+lc)-tc), axis=1)))547# loss = outputXc - outputX + self.marge548# return T.sum(loss * (loss > 0))549# def loss_func(self, indices, Y):550# # Metric used for early stopping551# return 1-auprc(Y, self.func(indices))552# def fit(self, tensor):553# if not self.consider_tc:554# # remove type-constraint information555# tensor.type_constraints = [[None, None]556# for i in xrange(tensor.shape[2])]557# elif self.lcwa:558# tensor.approximate_type_constraints()559# self.type_constraints = tensor.type_constraints560# self.Nent = tensor.shape[0]561# self.Nrel = tensor.shape[2]562# self.samplefunc = tt.compute_corrupted_bordes563# X = T.imatrix("X") # matrices with triple indices564# Xc = T.imatrix("Xc") # corrupted entities565# self.E = th.shared(566# value=init_uniform(self.rng, tensor.shape[0], self.ndim,567# dtype=self.dtype), name="Ents_emb")568# self.R = th.shared(569# value=init_uniform(self.rng, tensor.shape[0], self.ndim,570# dtype=self.dtype), name="Rels_emb")571# self.parameters = [self.E, self.R]572# # Output function TransE: d(h+l,t)573# self.func = th.function([X], self.__graph_pred(X))574# # Define the cost function575# loss_pos = self.__graph_train(X, Xc)576# # Normalization function for embeddings of entities:577# batch_idcs = T.ivector('batch_idcs')578# update = OrderedDict({self.E: T.set_subtensor(579# self.E[batch_idcs], self.E[batch_idcs] /580# T.sqrt(T.sum(self.E[batch_idcs] ** 2, axis=1, keepdims=True)))})581# self.normalize = th.function([batch_idcs], [], updates=update)582# # Update function583# self.update_func = gd([X, Xc], loss_pos, self.parameters,584# lr=[self.lremb,585# self.lrparam/float(self.mbatchsize)])586# # Train the model with stg587# fitted_parameters, self.used_epochs, self.epoch_times = (588# sgd_on_triples(self.rng, tensor, self, neval=self.neval,589# mbsize=self.mbatchsize, unlabeled=True,590# copy_X_train=not self.parallization_precautions))591# for i, parameter in enumerate(fitted_parameters):592# self.parameters[i].set_value(parameter.get_value())593# @property594# def sparsity(self):595# raise NotImplementedError596# def clear(self):597# """Deletes the memory expensive parameters."""598# del self.E599# del self.R600# del self.parameters601# os.remove(self.savefile)602# def predict(self, indices):603# # This should be just d(h+l,t)604# return self.func(indices)605# @staticmethod606# def model_creator(settings):607# # For loading multiple model parameters from a configuration file608# confs = None609# if settings['try_all_reg_combinations']:610# confs = list(itertools.product(tolist(settings['rank']),611# tolist(settings['gamma']),612# tolist(settings['lrate_emb']),613# tolist(settings['lrate_par'])))614# else:615# confs = [[r, m, lr1, lr2]616# for r, m, lr1, lr2 in617# zip(tolist(settings['rank']),618# tolist(settings['gamma']),619# tolist(settings['lrate_emb']),620# tolist(settings['lrate_par']))]621# confs = list(itertools.product(tolist(settings['seed']), confs))622# models = []623# for i, conf in enumerate(confs):624# s, conf = conf625# r, m, lr1, lr2 = conf626# models.append(TranslationalEmbeddingsModel(627# consider_tc=settings['consider_tc'],628# simfn=str.upper(settings['simfn']),629# ndim=r,630# marge=m,631# lremb=lr1,632# lrparam=lr2,633# mbatchsize=settings['mbatchsize'],634# maxepoch=settings['maxepoch'],635# neval=settings['neval'],636# lcwa=settings['lcwa'],637# seed=s,638# savepath=settings['savepath'],639# mid=i))...

Full Screen

Full Screen

EnsembleTable.py

Source:EnsembleTable.py Github

copy

Full Screen

...87 print("Validation Accuracy Table")88 for i in range(len(self.models)):89 criterion = nn.CrossEntropyLoss()90 ensemble_solver = Ensemble([self.models[i]])91 top1_acc, top5_acc, val_loss = ensemble_solver.evaluate_all(criterion, self.dataloaders, self.dataset_sizes)92 fgsm_top1_acc, fgsm_top5_acc, fgsm_val_loss = ensemble_solver.evaluate_all(criterion, self.fgsm_dataloader, self.fgsm_dataset_sizes)93 blurred_top1_acc, blurred_top5_acc, blurred_val_loss = ensemble_solver.evaluate_all(criterion, self.blurred_dataloader, self.blurred_dataset_sizes)94 print("{} = top1_acc: {}, top5_acc:{}, fgsm_top1_acc:{}, blurred_top1_acc:{}".format(model_names[i], top1_acc, top5_acc, fgsm_top1_acc, blurred_top1_acc))95 96 print()97 resnet_model, vgg_model, dense_model, attention_model = self.models98 99 combo = [100 [resnet_model, dense_model, vgg_model, attention_model],101 [resnet_model, dense_model, attention_model],102 [resnet_model, vgg_model, attention_model],103 [resnet_model, dense_model, vgg_model],104 [dense_model, vgg_model, attention_model]105 ]106 combo_names = [107 ["Resnet152, VGG19_bn, DenseNet, ResAttNet"],108 ["Resnet152, DenseNet, ResAttNet"],109 ["Resnet152, VGG19_bn, ResAttNet"],110 ["Resnet152, VGG19_bn, DenseNet"],111 ["DenseNet, VGG19_bn, ResAttNet"]112 ]113 114 print("Ensemble by Averaging logits")115 for i in range(len(combo)):116 criterion = nn.CrossEntropyLoss()117 ensemble_solver = Ensemble(combo[i])118 top1_acc, top5_acc, val_loss = ensemble_solver.evaluate_all(criterion, self.dataloaders, self.dataset_sizes)119 fgsm_top1_acc, fgsm_top5_acc, fgsm_val_loss = ensemble_solver.evaluate_all(criterion, self.fgsm_dataloader, self.fgsm_dataset_sizes)120 blurred_top1_acc, blurred_top5_acc, blurred_val_loss = ensemble_solver.evaluate_all(criterion, self.blurred_dataloader, self.blurred_dataset_sizes)121 print(combo_names[i][0])122 print("Validation top1_acc: {}, top5_acc:{}, fgsm_top1_acc:{}, blurred_top1_acc:{}".format(top1_acc, top5_acc, fgsm_top1_acc, blurred_top1_acc))123 print()124 print("Ensemble by Majority Vote")125 for i in range(len(combo)):126 criterion = nn.CrossEntropyLoss()127 ensemble_solver = Ensemble(combo[i])128 top1_acc, top5_acc, val_loss = ensemble_solver.evaluate_all(criterion, self.dataloaders, self.dataset_sizes, mode="maj vote")129 fgsm_top1_acc, fgsm_top5_acc, fgsm_val_loss = ensemble_solver.evaluate_all(criterion, self.fgsm_dataloader, self.fgsm_dataset_sizes, mode="maj vote")130 blurred_top1_acc, blurred_top5_acc, blurred_val_loss = ensemble_solver.evaluate_all(criterion, self.blurred_dataloader, self.blurred_dataset_sizes, mode="maj vote")131 print(combo_names[i][0])132 print("Validation top1_acc: {}, top5_acc:{}, fgsm_top1_acc:{}, blurred_top1_acc:{}".format(top1_acc, top5_acc, fgsm_top1_acc, blurred_top1_acc))...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1import nn2import gensim3from gensim.test.utils import datapath4from gensim.models import KeyedVectors5import numpy as np6import torch7from argparse import ArgumentParser8from pathlib import Path9from tqdm import tqdm10p = ArgumentParser()11p.add_argument("embeddings_folder", type = Path)12p.add_argument("output_folder", type = Path)13p.add_argument("vaa_pairs_folder", type = Path)14p.add_argument("avv_pairs_folder", type = Path)15p.add_argument("--dependency", action = "append")16p.add_argument("--language", action = "append")17args = p.parse_args()18embeddings_folder = args.embeddings_folder19output_folder = args.output_folder20vaa_pairs_folder = args.vaa_pairs_folder21avv_pairs_folder = args.avv_pairs_folder22dependency = args.dependency23language = args.language24for lang in language:25 embeddings = KeyedVectors.load_word2vec_format(datapath((embeddings_folder / "embeddings_{}".format(lang))), binary = False)26 file0 = open((embeddings_folder / "embeddings_{}".format(lang)), "rb")27 vocabulary = []28 file0.readline()29 for line in file0:30 line = line.decode('utf-8').split()31 vocabulary.append(line[0])32 file0.close()33 model = nn.DotProductModel(embeddings, vocabulary)34 for dep in dependency:35 file1 = open((vaa_pairs_folder / "{}/v_a1_a2_filtered_pairs_{}_{}".format(lang, dep, lang)), "rb")36 file2 = open((avv_pairs_folder / "{}/a_v1_v2_filtered_pairs_{}_{}".format(lang, dep, lang)), "rb")37 fileout1 = open((output_folder / "ranking_v_a_{}_{}".format(dep, lang)), 'w', encoding = 'utf-8')38 fileout2 = open((output_folder / "disambiguate_v_a_{}_{}".format(dep, lang)), 'w', encoding = 'utf-8')39 evaluate_all = 040 total_all = 041 evaluate = 042 total = 043 examples = []44 for line in file1:45 line = line.decode("utf-8").split()46 examples.append(line)47 results = model.pseudo_disambiguate(examples)48 print("{}_{}_ranking...".format(lang, dep))49 for i in tqdm(range(len(results))):50 v, a1, a2, rp, rn, d = results[i]51 if (i != 0 and v != results[i-1][0]) or i == len(results) - 1:52 fileout1.write("evaluate:\t" + str(evaluate / total) + "\n")53 evaluate_all += evaluate54 total_all += total55 evaluate = 056 total = 057 #for v, a1, a2, rp, rn, d in results:58 fileout1.write(v + "\t" + a1 + "\t" + a2 + "\t" + str(rp) + "\t" + str(rn) + "\t" + str(d) + "\n")59 evaluate += (1-d)60 total += 161 if total_all == 0:62 continue63 fileout1.write("evaluate_all:\t" + str(evaluate_all / total_all) + "\n")64 evaluate_all = 065 total_all = 066 evaluate = 067 total = 068 examples = []69 for line in file2:70 line = line.decode("utf-8").split()71 examples.append(line)72 results = model.pseudo_disambiguate(examples)73 print("{}_{}_disambiguating...".format(lang, dep))74 for i in tqdm(range(len(results))):75 v, a1, a2, rp, rn, d = results[i]76 if (i != 0 and v != results[i-1][0]) or i == len(results) - 1:77 fileout2.write("evaluate:\t" + str(evaluate / total) + "\n")78 evaluate_all += evaluate79 total_all += total80 evaluate = 081 total = 082 #for v, a1, a2, rp, rn, d in results:83 fileout2.write(v + "\t" + a1 + "\t" + a2 + "\t" + str(rp) + "\t" + str(rn) + "\t" + str(d) + "\n")84 evaluate += (1-d)85 total += 186 if total_all == 0:87 continue88 fileout2.write("evaluate_all:\t" + str(evaluate_all / total_all) + "\n")89 file1.close()90 file2.close()91 fileout1.close()92 fileout2.close()93"""94 posi_v_a = {}95 posi_a_v = {}96 nega_v_a = {}97 nega_a_v = {}98 for line in file1:99 line = line.decode('utf-8').split()100 if line[0] not in posi_v_a:101 posi_v_a[line[0]] = []102 nega_v_a[line[0]] = []103 posi_v_a[line[0]].append(line[1])104 nega_v_a[line[0]].append(line[2])105 for line in file2:106 line = line.decode('utf-8').split()107 if line[0] not in posi_a_v:108 posi_a_v[line[0]] = []109 nega_a_v[line[0]] = []110 posi_a_v[line[0]].append(line[1])111 nega_a_v[line[0]].append(line[2])112 print("ranking...")113 fileout1.write("v\targ_posi\targ_nega\trank_posi\trank_nega\tdisambiguate\n")114 evaluate_all = 0115 total_all = 0116 for cnt in tqdm(range(len(posi_v_a))):117 i = list(posi_v_a.keys())[cnt]118 examples = []119 for j in posi_v_a[i]:120 examples.append((i, j))121 posi_ranks = model.rank(examples)122 examples = []123 for j in nega_v_a[i]:124 examples.append((i, j))125 nega_ranks = model.rank(examples)126 evaluate = 0127 total = 0128 print(len(posi_ranks))129 print(len(nega_ranks))130 for n in range(len(posi_ranks)):131 disam = 1132 total += 1133 if len(posi_ranks[n]) < 3 or len(nega_ranks[n]) < 3:134 continue135 if posi_ranks[n][2] > nega_ranks[n][2]:136 disam = 0137 evaluate += 1138 fileout1.write(posi_ranks[n][0] + "\t" + posi_ranks[n][1] + "\t" + nega_ranks[n][1] + "\t" + str(posi_ranks[n][2]) + "\t" + str(nega_ranks[n][2]) + "\t" + str(disam) + "\n")139 fileout1.write("evaluate:\t" + str(evaluate / total) + "\n")140 evaluate_all += evaluate141 total_all += total142 fileout1.write("evaluate_all:\t" + str(evaluate_all / total_all) + "\n")143 print("disambiguating...")144 fileout2.write("arg\tv_posi\tv_nega\trank_posi\trank_nega\tdisambiguate\n")145 evaluate_all = 0146 total_all = 0147 for cnt in tqdm(range(len(posi_a_v))):148 i = list(posi_a_v.keys())[cnt]149 examples = []150 for j in posi_a_v[i]:151 examples.append((i, j))152 posi_ranks = model.rank(examples)153 examples = []154 for j in nega_a_v[i]:155 examples.append((i, j))156 nega_ranks = model.rank(examples)157 evaluate = 0158 total = 0159 for n in range(len(posi_ranks)):160 disam = 1161 total += 1162 if len(posi_ranks[n]) < 3 or len(nega_ranks[n]) < 3:163 continue164 if posi_ranks[n][2] > nega_ranks[n][2]:165 disam = 0166 evaluate += 1167 fileout2.write(posi_ranks[n][0] + "\t" + posi_ranks[n][1] + "\t" + nega_ranks[n][1] + "\t" + str(posi_ranks[n][2]) + "\t" + str(nega_ranks[n][2]) + "\t" + str(disam) + "\n")168 fileout2.write("evaluate:\t" + str(evaluate / total) + "\n")169 evaluate_all += evaluate170 total_all += total171 fileout2.write("evaluate_all:\t" + str(evaluate_all / total_all) + "\n")...

Full Screen

Full Screen

evaluators.py

Source:evaluators.py Github

copy

Full Screen

...55 dist_m = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \56 torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()57 dist_m.addmm_(1, -2, x, y.t())58 return dist_m, x.numpy(), y.numpy()59def evaluate_all(query_features, gallery_features, distmat, query=None, gallery=None,60 query_ids=None, gallery_ids=None,61 query_cams=None, gallery_cams=None,62 cmc_topk=(1, 5, 10), cmc_flag=False):63 if query is not None and gallery is not None:64 query_ids = [pid for _, pid, _ in query]65 gallery_ids = [pid for _, pid, _ in gallery]66 query_cams = [cam for _, _, cam in query]67 gallery_cams = [cam for _, _, cam in gallery]68 else:69 assert (query_ids is not None and gallery_ids is not None70 and query_cams is not None and gallery_cams is not None)71 # Compute mean AP72 mAP = mean_ap(distmat, query_ids, gallery_ids, query_cams, gallery_cams)73 print('Mean AP: {:4.1%}'.format(mAP))74 if (not cmc_flag):75 return mAP76 cmc_configs = {77 'market1501': dict(separate_camera_set=False,78 single_gallery_shot=False,79 first_match_break=True),}80 cmc_scores = {name: cmc(distmat, query_ids, gallery_ids,81 query_cams, gallery_cams, **params)82 for name, params in cmc_configs.items()}83 print('CMC Scores:')84 for k in cmc_topk:85 print(' top-{:<4}{:12.1%}'.format(k, cmc_scores['market1501'][k-1]))86 return cmc_scores['market1501'], mAP87class Evaluator(object):88 def __init__(self, model):89 super(Evaluator, self).__init__()90 self.model = model91 def evaluate(self, data_loader, query, gallery, cmc_flag=False, rerank=False):92 features, _ = extract_features(self.model, data_loader)93 distmat, query_features, gallery_features = pairwise_distance(features, query, gallery)94 results = evaluate_all(query_features, gallery_features, distmat, query=query, gallery=gallery, cmc_flag=cmc_flag)95 if (not rerank):96 return results97 print('Applying person re-ranking ...')98 distmat_qq, _, _ = pairwise_distance(features, query, query)99 distmat_gg, _, _ = pairwise_distance(features, gallery, gallery)100 distmat = re_ranking(distmat.numpy(), distmat_qq.numpy(), distmat_gg.numpy())...

Full Screen

Full Screen

Playwright tutorial

LambdaTest’s Playwright tutorial will give you a broader idea about the Playwright automation framework, its unique features, and use cases with examples to exceed your understanding of Playwright testing. This tutorial will give A to Z guidance, from installing the Playwright framework to some best practices and advanced concepts.

Chapters:

  1. What is Playwright : Playwright is comparatively new but has gained good popularity. Get to know some history of the Playwright with some interesting facts connected with it.
  2. How To Install Playwright : Learn in detail about what basic configuration and dependencies are required for installing Playwright and run a test. Get a step-by-step direction for installing the Playwright automation framework.
  3. Playwright Futuristic Features: Launched in 2020, Playwright gained huge popularity quickly because of some obliging features such as Playwright Test Generator and Inspector, Playwright Reporter, Playwright auto-waiting mechanism and etc. Read up on those features to master Playwright testing.
  4. What is Component Testing: Component testing in Playwright is a unique feature that allows a tester to test a single component of a web application without integrating them with other elements. Learn how to perform Component testing on the Playwright automation framework.
  5. Inputs And Buttons In Playwright: Every website has Input boxes and buttons; learn about testing inputs and buttons with different scenarios and examples.
  6. Functions and Selectors in Playwright: Learn how to launch the Chromium browser with Playwright. Also, gain a better understanding of some important functions like “BrowserContext,” which allows you to run multiple browser sessions, and “newPage” which interacts with a page.
  7. Handling Alerts and Dropdowns in Playwright : Playwright interact with different types of alerts and pop-ups, such as simple, confirmation, and prompt, and different types of dropdowns, such as single selector and multi-selector get your hands-on with handling alerts and dropdown in Playright testing.
  8. Playwright vs Puppeteer: Get to know about the difference between two testing frameworks and how they are different than one another, which browsers they support, and what features they provide.
  9. Run Playwright Tests on LambdaTest: Playwright testing with LambdaTest leverages test performance to the utmost. You can run multiple Playwright tests in Parallel with the LammbdaTest test cloud. Get a step-by-step guide to run your Playwright test on the LambdaTest platform.
  10. Playwright Python Tutorial: Playwright automation framework support all major languages such as Python, JavaScript, TypeScript, .NET and etc. However, there are various advantages to Python end-to-end testing with Playwright because of its versatile utility. Get the hang of Playwright python testing with this chapter.
  11. Playwright End To End Testing Tutorial: Get your hands on with Playwright end-to-end testing and learn to use some exciting features such as TraceViewer, Debugging, Networking, Component testing, Visual testing, and many more.
  12. Playwright Video Tutorial: Watch the video tutorials on Playwright testing from experts and get a consecutive in-depth explanation of Playwright automation testing.

Run Playwright Python automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful