How to use norm_position method in ATX

Best Python code snippet using ATX

ns_modules.py

Source:ns_modules.py Github

copy

Full Screen

1import torch2import torch.nn as nn3from .base_model import BaseModel4from solvers import metrics5import torch.nn.functional as F6class MlpLayer(nn.Module):7 def __init__(self, input_d, output_d, feature_num=64, layer_norm=True):8 super(MlpLayer, self).__init__()9 if not layer_norm:10 self.mini_version = True11 else:12 self.mini_version = False13 self.fc1 = nn.Linear(input_d, feature_num)14 self.fc2 = nn.Linear(feature_num, feature_num)15 self.fc3 = nn.Linear(feature_num, feature_num)16 self.fc4 = nn.Linear(feature_num, feature_num)17 self.fc5 = nn.Linear(feature_num, feature_num)18 self.bn1 = nn.BatchNorm1d(num_features=feature_num) if layer_norm else nn.Identity()19 self.bn2 = nn.BatchNorm1d(num_features=feature_num) if layer_norm else nn.Identity()20 self.bn3 = nn.BatchNorm1d(num_features=feature_num) if layer_norm else nn.Identity()21 self.bn4 = nn.BatchNorm1d(num_features=feature_num) if layer_norm else nn.Identity()22 self.bn5 = nn.BatchNorm1d(num_features=feature_num) if layer_norm else nn.Identity()23 self.output_fc = nn.Linear(feature_num, output_d)24 def forward(self, x):25 x = F.relu(self.bn1(self.fc1(x)))26 x = F.relu(self.bn2(self.fc2(x)))27 if not self.mini_version:28 x = F.relu(self.bn3(self.fc3(x)))29 x = F.relu(self.bn4(self.fc4(x)))30 x = F.relu(self.bn5(self.fc5(x)))31 x = self.output_fc(x)32 return x33class MLPNS(nn.Module):34 def __init__(self, hidden_size=64, layer_norm=True):35 super(MLPNS, self).__init__()36 self.force_feature_size, self.state_feature_size, self.cp_feature_size = hidden_size, hidden_size, hidden_size37 self.state_tensor_dim, self.force_tensor_dim, self.cp_tensor_dim = 14, 15, 1538 self.state_encoder = MlpLayer(input_d=self.state_tensor_dim, output_d=self.state_feature_size, layer_norm=layer_norm)39 self.force_encoder = MlpLayer(input_d=self.force_tensor_dim, output_d=self.force_feature_size, layer_norm=layer_norm)40 self.cp_encoder = MlpLayer(input_d=self.cp_tensor_dim, output_d=self.cp_feature_size, layer_norm=layer_norm)41 self.force_decoder = MlpLayer(input_d=self.state_feature_size + self.force_feature_size + self.cp_feature_size,42 output_d=self.state_tensor_dim, layer_norm=layer_norm)43 def forward(self, state_tensor, force_tensor, contact_points):44 batch_size = force_tensor.shape[0]45 force_tensor, contact_points, state_tensor = force_tensor.reshape(batch_size, -1), contact_points.reshape(batch_size, -1), \46 state_tensor.reshape(batch_size, -1)47 force_feature = self.force_encoder(force_tensor)48 state_feature = self.state_encoder(state_tensor)49 cp_feature = self.cp_encoder(contact_points)50 fused_feature = torch.cat([force_feature, state_feature, cp_feature], dim=-1)51 predict_residual_state = self.force_decoder(fused_feature)52 # predict_residual_state[:, :3] /= 1053 return state_tensor + predict_residual_state54class NSWithImageFeature(nn.Module):55 def __init__(self, hidden_size=64, layer_norm=True, image_feature_dim=225, norm_position=True):56 super(NSWithImageFeature, self).__init__()57 self.force_feature_size, self.state_feature_size, self.cp_feature_size, self.img_feature_size = \58 hidden_size, hidden_size, hidden_size, hidden_size59 self.state_tensor_dim, self.force_tensor_dim, self.cp_tensor_dim, self.img_feature_dim = \60 7, 15, 15, image_feature_dim61 self.state_encoder = MlpLayer(input_d=self.state_tensor_dim, output_d=self.state_feature_size,62 layer_norm=layer_norm)63 self.force_encoder = MlpLayer(input_d=self.force_tensor_dim, output_d=self.force_feature_size,64 layer_norm=layer_norm)65 self.cp_encoder = MlpLayer(input_d=self.cp_tensor_dim, output_d=self.cp_feature_size, layer_norm=layer_norm)66 self.image_encoder = MlpLayer(input_d=self.img_feature_dim, output_d=self.img_feature_size,67 layer_norm=layer_norm)68 total_dim_before_decode = self.state_feature_size + self.force_feature_size + self.cp_feature_size + \69 self.img_feature_size70 self.force_decoder = MlpLayer(input_d=total_dim_before_decode, output_d=self.state_tensor_dim,71 layer_norm=layer_norm)72 self.norm_position = norm_position73 def forward(self, state_tensor, force_tensor, contact_points, image_feature):74 batch_size = force_tensor.shape[0]75 force_tensor, contact_points, state_tensor = force_tensor.reshape(batch_size, -1), contact_points.reshape(batch_size, -1), \76 state_tensor.reshape(batch_size, -1)77 force_feature = self.force_encoder(force_tensor)78 state_feature = self.state_encoder(state_tensor)79 cp_feature = self.cp_encoder(contact_points)80 img_feature = self.image_encoder(image_feature)81 fused_feature = torch.cat([force_feature, state_feature, cp_feature, img_feature], dim=-1)82 predict_residual_state = self.force_decoder(fused_feature)83 if self.norm_position:84 predict_residual_state[:, :3] /= 1085 return state_tensor + predict_residual_state86class NSLSTM(nn.Module):87 def __init__(self, hidden_size=64, layer_norm=True, image_feature_dim=225, norm_position=True):88 super(NSLSTM, self).__init__()89 self.force_feature_size, self.state_feature_size, self.cp_feature_size, self.img_feature_size = \90 hidden_size, hidden_size, hidden_size, hidden_size91 self.state_tensor_dim, self.force_tensor_dim, self.cp_tensor_dim, self.img_feature_dim = \92 7, 15, 15, image_feature_dim93 self.num_layers = 394 self.state_encoder = MlpLayer(input_d=self.state_tensor_dim, output_d=self.state_feature_size,95 layer_norm=layer_norm)96 self.force_encoder = MlpLayer(input_d=self.force_tensor_dim, output_d=self.force_feature_size,97 layer_norm=layer_norm)98 self.cp_encoder = MlpLayer(input_d=self.cp_tensor_dim, output_d=self.cp_feature_size, layer_norm=layer_norm)99 self.image_encoder = MlpLayer(input_d=self.img_feature_dim, output_d=self.img_feature_size,100 layer_norm=layer_norm)101 lstm_io_size = self.state_feature_size + self.force_feature_size + self.cp_feature_size + self.img_feature_size102 self.state_lstm = nn.LSTM(input_size=lstm_io_size,103 hidden_size=hidden_size, batch_first=True, num_layers=self.num_layers)104 self.state_decoder = MlpLayer(input_d=hidden_size, output_d=self.state_tensor_dim, layer_norm=layer_norm)105 self.norm_position = norm_position106 def forward(self, state_tensor, force_tensor, contact_points, image_feature, last_hidden, last_cell):107 batch_size = force_tensor.shape[0]108 force_tensor, contact_points, state_tensor = force_tensor.reshape(batch_size, -1), \109 contact_points.reshape(batch_size, -1), \110 state_tensor.reshape(batch_size, -1)111 force_feature = self.force_encoder(force_tensor)112 state_feature = self.state_encoder(state_tensor)113 cp_feature = self.cp_encoder(contact_points)114 img_feature = self.image_encoder(image_feature)115 fused_input_feature = torch.cat([force_feature, state_feature, cp_feature, img_feature], dim=-1).unsqueeze(0)116 if last_hidden is None or last_cell is None:117 output_state_feature, (last_hidden, last_cell) = self.state_lstm(fused_input_feature)118 else:119 output_state_feature, (last_hidden, last_cell) = self.state_lstm(fused_input_feature, (last_hidden, last_cell))120 output_state_feature = output_state_feature.squeeze(1)121 predicted_state = self.state_decoder(output_state_feature)122 if self.norm_position:123 predicted_state[:, :3] /= 10124 return state_tensor + predicted_state, last_hidden, last_cell125def get_denorm_state_tensor(state_ten, stat):126 pos_mean, pos_std, rot_mean, rot_std, = \127 stat['position_mean'], stat['position_std'], stat['rotation_mean'], stat['rotation_std']128 state_ten[:, :3] = state_ten[:, :3] * pos_std + pos_mean129 if 'velocity_mean' in stat.keys():130 vel_mean, vel_std, omg_mean, omg_std = stat['velocity_mean'], stat['velocity_std'], stat['omega_mean'], stat['omega_std']131 state_ten[:, 7:10] = state_ten[:, 7:10] * vel_std + vel_mean132 state_ten[:, 10:] = state_ten[:, 10:] * omg_std + omg_mean133 return state_ten134class NSBaseModel(BaseModel):135 # deprecating136 metric = [metrics.StateMetric]137 def __init__(self, args, ):138 super(NSBaseModel, self).__init__(args)139 self.loss_function = args.loss140 self.force_feature_size, self.state_feature_size, self.cp_feature_size = 64, 64, 64141 self.state_tensor_dim, self.force_tensor_dim, self.cp_tensor_dim = 7, 15, 15142 self.state_encoder = MlpLayer(input_d=self.state_tensor_dim, output_d=self.state_feature_size)143 self.force_encoder = MlpLayer(input_d=self.force_tensor_dim, output_d=self.force_feature_size)144 self.cp_encoder = MlpLayer(input_d=self.cp_tensor_dim, output_d=self.cp_feature_size)145 self.force_decoder = MlpLayer(input_d=self.state_feature_size + self.force_feature_size + self.cp_feature_size,146 output_d=self.state_tensor_dim)147 self.number_of_cp = args.number_of_cp148 self.sequence_length = args.sequence_length149 self.gpu_ids = args.gpu_ids150 self.residual = args.residual151 def loss(self, args):152 return self.loss_function(args)153 def forward(self, input_d, target_d):154 forces, contact_points, state_tensor = input_d['norm_force'], input_d['norm_contact_points'], \155 input_d['norm_state_tensor']156 batch_size = forces.shape[0]157 forces, contact_points, state_tensor = forces.reshape(batch_size, -1), contact_points.reshape(batch_size, -1), \158 state_tensor.reshape(batch_size, -1)159 force_feature = self.force_encoder(forces)160 state_feature = self.state_encoder(state_tensor)161 cp_feature = self.cp_encoder(contact_points)162 fused_feature = torch.cat([force_feature, state_feature, cp_feature], dim=-1)163 predict_state = self.force_decoder(fused_feature)164 if self.residual:165 predict_state = state_tensor + predict_state166 sta = {one_key: target_d['statistics'][one_key].squeeze() for one_key in target_d['statistics']}167 target_d['norm_state_tensor'] = target_d['norm_state_tensor'].reshape(batch_size, -1)168 target_d['denorm_state_tensor'] = get_denorm_state_tensor(state_ten=target_d['norm_state_tensor'].clone(), stat=sta)169 target_d['denorm_input_state'] = get_denorm_state_tensor(state_ten=state_tensor.clone().detach(), stat=sta)170 denorm_predict_state = get_denorm_state_tensor(state_ten=predict_state.clone(), stat=sta)171 output_d = {172 'norm_state_tensor': predict_state,173 'denorm_state_tensor': denorm_predict_state174 }175 return output_d, target_d176 def optimizer(self):...

Full Screen

Full Screen

encoder.py

Source:encoder.py Github

copy

Full Screen

1#!/usr/bin/env python2# coding: utf-83# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>4"""5This module contains implementations of various routines for6encoding and decoding raw data into feature vectors.7"""8S_BEGIN = "<<BEGIN>>" #chr(244)9S_END = "<<END>>" #chr(243)10U_WORD = "<<UWORD>>" #chr(242)11WORD_TAG_SEP = "_"12class Token(object):13 def __init__(self, tag_prev, tag, word, sent_length, position, token_prev, unknown=False):14 self.tag = tag15 self.tag_prev = tag_prev16 self.raw = word17 self.token_prev = token_prev18 if word is not None:19 self.title = word.istitle()20 self.upper = word.isupper()21 self.form = word.lower()22 self.lemma = word.lower()23 self.word_len = len(word)24 else:25 self.title = False26 self.upper = False27 self.form = None28 self.lemma = None29 self.word_len = 030 if unknown:31 self.form = U_WORD32 self.lemma = U_WORD33 self.position = position34 self.sent_len = sent_length35 self.norm_position = position36 def __repr__(self):37 return "<Token(%s, %s, %s, pos=%.3f)>" % (self.tag_prev,38 self.tag,39 self.form,40 self.norm_position)41def tag(tagged_word):42 return tagged_word[1]43def word(tagged_word):44 return tagged_word[0]45def bigrams(sequence):46 seq_bigrams = []47 for i in xrange(len(sequence) - 1):48 seq_bigrams.append((sequence[i], sequence[i + 1]))49 return seq_bigrams50def words(tagged_words):51 return [tw[0] for tw in tagged_words]52def tags(tagged_words):53 return [tw[1] for tw in tagged_words]54class InputTransformer(object):55 def __init__(self):56 self.words = set()57 self.tags = set()58 def annotate(self, input_stream, tagged=True):59 lineCtr=060 annotated = []61 for line in input_stream:62 lineCtr = lineCtr+163 print lineCtr64 tagged_words = line.split()65 sent_len = len(tagged_words)66 tag_prev = S_BEGIN67 token_prev = None68 tokens = []69 for i, word_tag in enumerate(tagged_words):70 if tagged:71 word, tag = word_tag.split(WORD_TAG_SEP)72 token = Token(tag_prev, tag, word, sent_len, i, token_prev)73 tag_prev = tag74 self.words.add(token.form)75 self.tags.add(token.tag)76 else:77 word = word_tag78 if word.lower() in self.words:79 token = Token(tag_prev, None, word, sent_len, i, token_prev)80 else:81 token = Token(tag_prev, None, word, sent_len, i, token_prev, unknown=True)82 83 #print token84 tokens.append(token)85 token_prev = token86 tokens.append(Token(tag_prev, S_END, None, sent_len, sent_len, token_prev))87 annotated.append(tokens)...

Full Screen

Full Screen

path_weighting.py

Source:path_weighting.py Github

copy

Full Screen

1#!/usr/bin/env python32"""Nicolas Gampierakis (2019). Scintillometer path weighting and3effective height calculator from path position and normalised height.4"""5import math6import numpy as np7import pandas as pd8import scipy as sp9from scipy import integrate, special10def bessel_second(x):11 """Calculates the bessel function for the specific path position.12 Args:13 x (float): path position, m14 Returns:15 y (float): bessel function of path point16 """17 bessel_variable = 2.283 * math.pi * (x - 0.5)18 if bessel_variable == 0:19 y = 120 else:21 y = 2 * (sp.special.jv(1, bessel_variable)) / bessel_variable22 return y23def pwf(path_position):24 """Path weighting function for effective path height25 Args:26 path_position (Series): input data array27 Returns:28 weight (float): weight29 """30 weight = []31 for i in path_position:32 weight.append(2.163 * bessel_second(i))33 return weight34def effective_z(path_height, path_position, b):35 """Calculates the effective path height across the entire36 scintillometer path based on actual path height and position.37 Args:38 path_height (Series): actual path height, m39 path_position (Series): the position along the40 scintillometer path41 b (str) = stability conditions, either stable, unstable, or no42 height dependency43 Returns:44 z_eff (float): effective path height, m45 """46 if b.lower() in ["stable", "s"]:47 b = -2 / 348 elif b.lower() in ["unstable", "u"]:49 b = -4 / 350 else:51 b = 152 print("No height dependency selected")53 ph_list = np.multiply(path_height ** b, pwf(path_position))54 # For path-building intersections55 ph_list[np.isnan(ph_list)] = 056 z_eff = (57 (sp.integrate.trapz(ph_list)) / (sp.integrate.trapz(pwf(path_position)))58 ) ** (1 / b)59 return z_eff60def return_z_effective(location_name):61 if location_name.lower() == "h":62 location_name = "hungerburg"63 elif location_name.lower() in ["s", "schießstand"]:64 location_name = "schiessstand"65 path_string = "../MATLAB/path_height_" + location_name.lower() + ".csv"66 path_height_data = pd.read_csv(67 path_string, header=None, names=["path_height", "norm_position"]68 )69 stability_factor = input(70 "Please enter the stability conditions (stable, " "unstable, or other):\n"71 )72 effective_path_height = effective_z(73 path_height_data["path_height"],74 path_height_data["norm_position"],75 stability_factor,76 )77 mean_path_height = np.mean(path_height_data["path_height"])78 path_weight = pwf(path_height_data["norm_position"])79 path_height_data["path_weight"] = path_weight80 print("Mean path height: " + str(mean_path_height) + "m")81 print("Effective path height: " + str(effective_path_height) + "m")82 return effective_path_height83 # For Matlab export84 # path_height_data.to_csv(path_or_buf="../MATLAB/hungerburg_sim.csv",...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run ATX automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful