How to use ops method in pytest-benchmark

Best Python code snippet using pytest-benchmark

inception_model.py

Source:inception_model.py Github

copy

Full Screen

1# Copyright 2016 Google Inc. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Inception-v3 expressed in TensorFlow-Slim.16 Usage:17 # Parameters for BatchNorm.18 batch_norm_params = {19 # Decay for the batch_norm moving averages.20 'decay': BATCHNORM_MOVING_AVERAGE_DECAY,21 # epsilon to prevent 0s in variance.22 'epsilon': 0.001,23 }24 # Set weight_decay for weights in Conv and FC layers.25 with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):26 with slim.arg_scope([slim.ops.conv2d],27 stddev=0.1,28 activation=tf.nn.relu,29 batch_norm_params=batch_norm_params):30 # Force all Variables to reside on the CPU.31 with slim.arg_scope([slim.variables.variable], device='/cpu:0'):32 logits, endpoints = slim.inception.inception_v3(33 images,34 dropout_keep_prob=0.8,35 num_classes=num_classes,36 is_training=for_training,37 restore_logits=restore_logits,38 scope=scope)39"""40from __future__ import absolute_import41from __future__ import division42from __future__ import print_function43import tensorflow as tf44from inception.slim import ops45from inception.slim import scopes46def inception_v3(inputs,47 dropout_keep_prob=0.8,48 num_classes=1000,49 is_training=True,50 restore_logits=True,51 scope=''):52 """Latest Inception from http://arxiv.org/abs/1512.00567.53 "Rethinking the Inception Architecture for Computer Vision"54 Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,55 Zbigniew Wojna56 Args:57 inputs: a tensor of size [batch_size, height, width, channels].58 dropout_keep_prob: dropout keep_prob.59 num_classes: number of predicted classes.60 is_training: whether is training or not.61 restore_logits: whether or not the logits layers should be restored.62 Useful for fine-tuning a model with different num_classes.63 scope: Optional scope for name_scope.64 Returns:65 a list containing 'logits', 'aux_logits' Tensors.66 """67 # end_points will collect relevant activations for external use, for example68 # summaries or losses.69 end_points = {}70 with tf.name_scope(scope, 'inception_v3', [inputs]):71 with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],72 is_training=is_training):73 with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],74 stride=1, padding='VALID'):75 # 299 x 299 x 376 end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,77 scope='conv0')78 # 149 x 149 x 3279 end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],80 scope='conv1')81 # 147 x 147 x 3282 end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],83 padding='SAME', scope='conv2')84 # 147 x 147 x 6485 end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],86 stride=2, scope='pool1')87 # 73 x 73 x 6488 end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],89 scope='conv3')90 # 73 x 73 x 80.91 end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],92 scope='conv4')93 # 71 x 71 x 192.94 end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],95 stride=2, scope='pool2')96 # 35 x 35 x 192.97 net = end_points['pool2']98 # Inception blocks99 with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],100 stride=1, padding='SAME'):101 # mixed: 35 x 35 x 256.102 with tf.variable_scope('mixed_35x35x256a'):103 with tf.variable_scope('branch1x1'):104 branch1x1 = ops.conv2d(net, 64, [1, 1])105 with tf.variable_scope('branch5x5'):106 branch5x5 = ops.conv2d(net, 48, [1, 1])107 branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])108 with tf.variable_scope('branch3x3dbl'):109 branch3x3dbl = ops.conv2d(net, 64, [1, 1])110 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])111 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])112 with tf.variable_scope('branch_pool'):113 branch_pool = ops.avg_pool(net, [3, 3])114 branch_pool = ops.conv2d(branch_pool, 32, [1, 1])115 net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)116 end_points['mixed_35x35x256a'] = net117 # mixed_1: 35 x 35 x 288.118 with tf.variable_scope('mixed_35x35x288a'):119 with tf.variable_scope('branch1x1'):120 branch1x1 = ops.conv2d(net, 64, [1, 1])121 with tf.variable_scope('branch5x5'):122 branch5x5 = ops.conv2d(net, 48, [1, 1])123 branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])124 with tf.variable_scope('branch3x3dbl'):125 branch3x3dbl = ops.conv2d(net, 64, [1, 1])126 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])127 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])128 with tf.variable_scope('branch_pool'):129 branch_pool = ops.avg_pool(net, [3, 3])130 branch_pool = ops.conv2d(branch_pool, 64, [1, 1])131 net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)132 end_points['mixed_35x35x288a'] = net133 # mixed_2: 35 x 35 x 288.134 with tf.variable_scope('mixed_35x35x288b'):135 with tf.variable_scope('branch1x1'):136 branch1x1 = ops.conv2d(net, 64, [1, 1])137 with tf.variable_scope('branch5x5'):138 branch5x5 = ops.conv2d(net, 48, [1, 1])139 branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])140 with tf.variable_scope('branch3x3dbl'):141 branch3x3dbl = ops.conv2d(net, 64, [1, 1])142 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])143 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])144 with tf.variable_scope('branch_pool'):145 branch_pool = ops.avg_pool(net, [3, 3])146 branch_pool = ops.conv2d(branch_pool, 64, [1, 1])147 net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)148 end_points['mixed_35x35x288b'] = net149 # mixed_3: 17 x 17 x 768.150 with tf.variable_scope('mixed_17x17x768a'):151 with tf.variable_scope('branch3x3'):152 branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')153 with tf.variable_scope('branch3x3dbl'):154 branch3x3dbl = ops.conv2d(net, 64, [1, 1])155 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])156 branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],157 stride=2, padding='VALID')158 with tf.variable_scope('branch_pool'):159 branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')160 net = tf.concat([branch3x3, branch3x3dbl, branch_pool], 3)161 end_points['mixed_17x17x768a'] = net162 # mixed4: 17 x 17 x 768.163 with tf.variable_scope('mixed_17x17x768b'):164 with tf.variable_scope('branch1x1'):165 branch1x1 = ops.conv2d(net, 192, [1, 1])166 with tf.variable_scope('branch7x7'):167 branch7x7 = ops.conv2d(net, 128, [1, 1])168 branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])169 branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])170 with tf.variable_scope('branch7x7dbl'):171 branch7x7dbl = ops.conv2d(net, 128, [1, 1])172 branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])173 branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])174 branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])175 branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])176 with tf.variable_scope('branch_pool'):177 branch_pool = ops.avg_pool(net, [3, 3])178 branch_pool = ops.conv2d(branch_pool, 192, [1, 1])179 net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)180 end_points['mixed_17x17x768b'] = net181 # mixed_5: 17 x 17 x 768.182 with tf.variable_scope('mixed_17x17x768c'):183 with tf.variable_scope('branch1x1'):184 branch1x1 = ops.conv2d(net, 192, [1, 1])185 with tf.variable_scope('branch7x7'):186 branch7x7 = ops.conv2d(net, 160, [1, 1])187 branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])188 branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])189 with tf.variable_scope('branch7x7dbl'):190 branch7x7dbl = ops.conv2d(net, 160, [1, 1])191 branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])192 branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])193 branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])194 branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])195 with tf.variable_scope('branch_pool'):196 branch_pool = ops.avg_pool(net, [3, 3])197 branch_pool = ops.conv2d(branch_pool, 192, [1, 1])198 net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)199 end_points['mixed_17x17x768c'] = net200 # mixed_6: 17 x 17 x 768.201 with tf.variable_scope('mixed_17x17x768d'):202 with tf.variable_scope('branch1x1'):203 branch1x1 = ops.conv2d(net, 192, [1, 1])204 with tf.variable_scope('branch7x7'):205 branch7x7 = ops.conv2d(net, 160, [1, 1])206 branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])207 branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])208 with tf.variable_scope('branch7x7dbl'):209 branch7x7dbl = ops.conv2d(net, 160, [1, 1])210 branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])211 branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])212 branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])213 branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])214 with tf.variable_scope('branch_pool'):215 branch_pool = ops.avg_pool(net, [3, 3])216 branch_pool = ops.conv2d(branch_pool, 192, [1, 1])217 net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)218 end_points['mixed_17x17x768d'] = net219 # mixed_7: 17 x 17 x 768.220 with tf.variable_scope('mixed_17x17x768e'):221 with tf.variable_scope('branch1x1'):222 branch1x1 = ops.conv2d(net, 192, [1, 1])223 with tf.variable_scope('branch7x7'):224 branch7x7 = ops.conv2d(net, 192, [1, 1])225 branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])226 branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])227 with tf.variable_scope('branch7x7dbl'):228 branch7x7dbl = ops.conv2d(net, 192, [1, 1])229 branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])230 branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])231 branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])232 branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])233 with tf.variable_scope('branch_pool'):234 branch_pool = ops.avg_pool(net, [3, 3])235 branch_pool = ops.conv2d(branch_pool, 192, [1, 1])236 net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)237 end_points['mixed_17x17x768e'] = net238 # Auxiliary Head logits239 aux_logits = tf.identity(end_points['mixed_17x17x768e'])240 with tf.variable_scope('aux_logits'):241 aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,242 padding='VALID')243 aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')244 # Shape of feature map before the final layer.245 shape = aux_logits.get_shape()246 aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,247 padding='VALID')248 aux_logits = ops.flatten(aux_logits)249 aux_logits = ops.fc(aux_logits, num_classes, activation=None,250 stddev=0.001, restore=restore_logits)251 end_points['aux_logits'] = aux_logits252 # mixed_8: 8 x 8 x 1280.253 # Note that the scope below is not changed to not void previous254 # checkpoints.255 # (TODO) Fix the scope when appropriate.256 with tf.variable_scope('mixed_17x17x1280a'):257 with tf.variable_scope('branch3x3'):258 branch3x3 = ops.conv2d(net, 192, [1, 1])259 branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,260 padding='VALID')261 with tf.variable_scope('branch7x7x3'):262 branch7x7x3 = ops.conv2d(net, 192, [1, 1])263 branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])264 branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])265 branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],266 stride=2, padding='VALID')267 with tf.variable_scope('branch_pool'):268 branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')269 net = tf.concat([branch3x3, branch7x7x3, branch_pool], 3)270 end_points['mixed_17x17x1280a'] = net271 # mixed_9: 8 x 8 x 2048.272 with tf.variable_scope('mixed_8x8x2048a'):273 with tf.variable_scope('branch1x1'):274 branch1x1 = ops.conv2d(net, 320, [1, 1])275 with tf.variable_scope('branch3x3'):276 branch3x3 = ops.conv2d(net, 384, [1, 1])277 branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),278 ops.conv2d(branch3x3, 384, [3, 1])], 3)279 with tf.variable_scope('branch3x3dbl'):280 branch3x3dbl = ops.conv2d(net, 448, [1, 1])281 branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])282 branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),283 ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)284 with tf.variable_scope('branch_pool'):285 branch_pool = ops.avg_pool(net, [3, 3])286 branch_pool = ops.conv2d(branch_pool, 192, [1, 1])287 net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)288 end_points['mixed_8x8x2048a'] = net289 # mixed_10: 8 x 8 x 2048.290 with tf.variable_scope('mixed_8x8x2048b'):291 with tf.variable_scope('branch1x1'):292 branch1x1 = ops.conv2d(net, 320, [1, 1])293 with tf.variable_scope('branch3x3'):294 branch3x3 = ops.conv2d(net, 384, [1, 1])295 branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),296 ops.conv2d(branch3x3, 384, [3, 1])], 3)297 with tf.variable_scope('branch3x3dbl'):298 branch3x3dbl = ops.conv2d(net, 448, [1, 1])299 branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])300 branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),301 ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)302 with tf.variable_scope('branch_pool'):303 branch_pool = ops.avg_pool(net, [3, 3])304 branch_pool = ops.conv2d(branch_pool, 192, [1, 1])305 net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)306 end_points['mixed_8x8x2048b'] = net307 # Final pooling and prediction308 with tf.variable_scope('logits'):309 shape = net.get_shape()310 net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')311 # 1 x 1 x 2048312 net = ops.dropout(net, dropout_keep_prob, scope='dropout')313 net = ops.flatten(net, scope='flatten')314 # 2048315 logits = ops.fc(net, num_classes, activation=None, scope='logits',316 restore=restore_logits)317 # 1000318 end_points['logits'] = logits319 end_points['predictions'] = tf.nn.softmax(logits, name='predictions')320 return logits, end_points321def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,322 batch_norm_decay=0.9997, batch_norm_epsilon=0.001):323 """Yields the scope with the default parameters for inception_v3.324 Args:325 weight_decay: the weight decay for weights variables.326 stddev: standard deviation of the truncated guassian weight distribution.327 batch_norm_decay: decay for the moving average of batch_norm momentums.328 batch_norm_epsilon: small float added to variance to avoid dividing by zero.329 Yields:330 a arg_scope with the parameters needed for inception_v3.331 """332 # Set weight_decay for weights in Conv and FC layers.333 with scopes.arg_scope([ops.conv2d, ops.fc],334 weight_decay=weight_decay):335 # Set stddev, activation and parameters for batch_norm.336 with scopes.arg_scope([ops.conv2d],337 stddev=stddev,338 activation=tf.nn.relu,339 batch_norm_params={340 'decay': batch_norm_decay,341 'epsilon': batch_norm_epsilon}) as arg_scope:...

Full Screen

Full Screen

gen_stimuli.py

Source:gen_stimuli.py Github

copy

Full Screen

1#!/usr/bin/env python-2.52import sys3import random4# Copyright 2017 ETH Zurich and University of Bologna.5# Copyright and related rights are licensed under the Solderpad Hardware6# License, Version 0.51 (the License); you may not use this file except in7# compliance with the License. You may obtain a copy of the License at8# http://solderpad.org/licenses/SHL-0.51. Unless required by applicable law9# or agreed to in writing, software, hardware and materials distributed under10# this License is distributed on an AS IS BASIS, WITHOUT WARRANTIES OR11# CONDITIONS OF ANY KIND, either express or implied. See the License for the12# specific language governing permissions and limitations under the License.13def write_hex8_arr(f, name, arr):14 f.write('unsigned int %s[] = {\n' % name)15 for v in arr:16 f.write('0x%02X%02X%02X%02X,\n' % (v[0] & 0xFF, v[1] & 0xFF, v[2] & 0xFF, v[3] & 0xFF))17 f.write('};\n\n')18 return19def write_hex16_arr(f, name, arr):20 f.write('unsigned int %s[] = {\n' % name)21 for v in arr:22 f.write('0x%04X%04X,\n' % (v[0] & 0xFFFF, v[1] & 0xFFFF))23 f.write('};\n\n')24 return25def write_hex32_arr(f, name, arr):26 f.write('unsigned int %s[] = {\n' % name)27 for v in arr:28 f.write('0x%08X,\n' % (v & 0xFFFFFFFF))29 f.write('};\n\n')30 return31f = open('testALU_stimuli.h', 'w')32# number of test cases to generate33N = 1534################################################################################35################################################################################36#37# ADDITION38#39################################################################################40################################################################################41################################################################################42# generate testdata for l.add43################################################################################44ops_a = []45ops_b = []46exp_res = []47for i in range(N):48 a = random.randint(-2**31, 2**31-1)49 b = random.randint(-2**31, 2**31-1)50 r = a + b51 ops_a.append(a)52 ops_b.append(b)53 exp_res.append(r)54write_hex32_arr(f, 'g_add_a', ops_a)55write_hex32_arr(f, 'g_add_b', ops_b)56write_hex32_arr(f, 'g_add_act', ops_a)57write_hex32_arr(f, 'g_add_exp', exp_res)58################################################################################59# generate testdata for l.addi60################################################################################61ops_a = []62exp_res = []63for i in range(N):64 a = random.randint(-2**31, 2**31-1)65 r = a + 10066 ops_a.append(a)67 exp_res.append(r)68write_hex32_arr(f, 'g_addi_a', ops_a)69write_hex32_arr(f, 'g_addi_act', ops_a)70write_hex32_arr(f, 'g_addi_exp', exp_res)71################################################################################72################################################################################73#74# SUBTRACTION75#76################################################################################77################################################################################78################################################################################79# generate testdata for l.sub80################################################################################81ops_a = []82ops_b = []83exp_res = []84for i in range(N):85 a = random.randint(-2**31, 2**31-1)86 b = random.randint(-2**31, 2**31-1)87 r = a - b88 ops_a.append(a)89 ops_b.append(b)90 exp_res.append(r)91write_hex32_arr(f, 'g_sub_a', ops_a)92write_hex32_arr(f, 'g_sub_b', ops_b)93write_hex32_arr(f, 'g_sub_act', ops_a)94write_hex32_arr(f, 'g_sub_exp', exp_res)95################################################################################96################################################################################97#98# LOGIC STUFF99#100################################################################################101################################################################################102################################################################################103# generate testdata for l.and104################################################################################105ops_a = []106ops_b = []107exp_res = []108for i in range(N):109 a = random.randint(-2**31, 2**31-1)110 b = random.randint(-2**31, 2**31-1)111 r = a & b112 ops_a.append(a)113 ops_b.append(b)114 exp_res.append(r)115write_hex32_arr(f, 'g_and_a', ops_a)116write_hex32_arr(f, 'g_and_b', ops_b)117write_hex32_arr(f, 'g_and_act', ops_a)118write_hex32_arr(f, 'g_and_exp', exp_res)119################################################################################120# generate testdata for l.or121################################################################################122ops_a = []123ops_b = []124exp_res = []125for i in range(N):126 a = random.randint(-2**31, 2**31-1)127 b = random.randint(-2**31, 2**31-1)128 r = a | b129 ops_a.append(a)130 ops_b.append(b)131 exp_res.append(r)132write_hex32_arr(f, 'g_or_a', ops_a)133write_hex32_arr(f, 'g_or_b', ops_b)134write_hex32_arr(f, 'g_or_act', ops_a)135write_hex32_arr(f, 'g_or_exp', exp_res)136################################################################################137# generate testdata for l.xor138################################################################################139ops_a = []140ops_b = []141exp_res = []142for i in range(N):143 a = random.randint(-2**31, 2**31-1)144 b = random.randint(-2**31, 2**31-1)145 r = a ^ b146 ops_a.append(a)147 ops_b.append(b)148 exp_res.append(r)149write_hex32_arr(f, 'g_xor_a', ops_a)150write_hex32_arr(f, 'g_xor_b', ops_b)151write_hex32_arr(f, 'g_xor_act', ops_a)152write_hex32_arr(f, 'g_xor_exp', exp_res)153################################################################################154################################################################################155#156# Shifting157#158################################################################################159################################################################################160################################################################################161# generate testdata for l.sll162################################################################################163ops_a = []164ops_b = []165exp_res = []166for i in range(N):167 a = random.randint(-2**31, 2**31-1)168 b = random.randint(0, 31)169 r = a << b170 ops_a.append(a)171 ops_b.append(b)172 exp_res.append(r)173write_hex32_arr(f, 'g_sll_a', ops_a)174write_hex32_arr(f, 'g_sll_b', ops_b)175write_hex32_arr(f, 'g_sll_act', ops_a)176write_hex32_arr(f, 'g_sll_exp', exp_res)177################################################################################178# generate testdata for l.srl179################################################################################180ops_a = []181ops_b = []182exp_res = []183for i in range(N):184 a = random.randint(-2**31, 2**31-1)185 b = random.randint(0, 31)186 # python does not provide logical right shifts, so we have to hack187 # something together188 r = (a % 0x100000000) >> b189 ops_a.append(a)190 ops_b.append(b)191 exp_res.append(r)192write_hex32_arr(f, 'g_srl_a', ops_a)193write_hex32_arr(f, 'g_srl_b', ops_b)194write_hex32_arr(f, 'g_srl_act', ops_a)195write_hex32_arr(f, 'g_srl_exp', exp_res)196################################################################################197# generate testdata for l.sra198################################################################################199ops_a = []200ops_b = []201exp_res = []202for i in range(N):203 a = random.randint(-2**31, 2**31-1)204 b = random.randint(0, 31)205 r = a >> b206 ops_a.append(a)207 ops_b.append(b)208 exp_res.append(r)209write_hex32_arr(f, 'g_sra_a', ops_a)210write_hex32_arr(f, 'g_sra_b', ops_b)211write_hex32_arr(f, 'g_sra_act', ops_a)212write_hex32_arr(f, 'g_sra_exp', exp_res)213################################################################################214# generate testdata for l.slli215################################################################################216ops_a = []217exp_res = []218for i in range(N):219 a = random.randint(-2**31, 2**31-1)220 r = a << 10221 ops_a.append(a)222 exp_res.append(r)223write_hex32_arr(f, 'g_slli_a', ops_a)224write_hex32_arr(f, 'g_slli_act', ops_a)225write_hex32_arr(f, 'g_slli_exp', exp_res)226################################################################################227# generate testdata for l.srli228################################################################################229ops_a = []230exp_res = []231for i in range(N):232 a = random.randint(-2**31, 2**31-1)233 # python does not provide logical right shifts, so we have to hack234 # something together235 r = (a % 0x100000000) >> 9236 ops_a.append(a)237 exp_res.append(r)238write_hex32_arr(f, 'g_srli_a', ops_a)239write_hex32_arr(f, 'g_srli_act', ops_a)240write_hex32_arr(f, 'g_srli_exp', exp_res)241################################################################################242# generate testdata for l.srai243################################################################################244ops_a = []245exp_res = []246for i in range(N):247 a = random.randint(-2**31, 2**31-1)248 r = a >> 21249 ops_a.append(a)250 exp_res.append(r)251write_hex32_arr(f, 'g_srai_a', ops_a)252write_hex32_arr(f, 'g_srai_act', ops_a)253write_hex32_arr(f, 'g_srai_exp', exp_res)254# ################################################################################255# # generate testdata for l.cmov256# ################################################################################257# ops_a = []258# ops_b = []259# exp_res = []260#261# for i in range(N):262# a = random.randint(0, 2**32-1)263# b = random.randint(0, 2**32-1)264#265# if(a > b):266# r = a267# else:268# r = b269#270# ops_a.append(a)271# ops_b.append(b)272# exp_res.append(r)273#274# write_hex32_arr(f, 'g_cmov_a', ops_a)275# write_hex32_arr(f, 'g_cmov_b', ops_b)276# write_hex32_arr(f, 'g_cmov_act', ops_a)...

Full Screen

Full Screen

count_hooks.py

Source:count_hooks.py Github

copy

Full Screen

1import argparse2import torch3import torch.nn as nn4multiply_adds = 15def count_convNd(m, x, y):6 x = x[0]7 cin = m.in_channels8 batch_size = x.size(0)9 kernel_ops = m.weight.size()[2:].numel()10 bias_ops = 1 if m.bias is not None else 011 ops_per_element = kernel_ops + bias_ops12 output_elements = y.nelement()13 # cout x oW x oH14 total_ops = batch_size * cin * output_elements * ops_per_element // m.groups15 # total_ops = batch_size * output_elements * (cin * kernel_ops // m.groups + bias_ops)16 m.total_ops = torch.Tensor([int(total_ops)])17def count_conv2d(m, x, y):18 x = x[0]19 cin = m.in_channels20 cout = m.out_channels21 kh, kw = m.kernel_size22 batch_size = x.size()[0]23 out_h = y.size(2)24 out_w = y.size(3)25 # ops per output element26 # kernel_mul = kh * kw * cin27 # kernel_add = kh * kw * cin - 128 kernel_ops = multiply_adds * kh * kw29 bias_ops = 1 if m.bias is not None else 030 ops_per_element = kernel_ops + bias_ops31 # total ops32 # num_out_elements = y.numel()33 output_elements = batch_size * out_w * out_h * cout34 total_ops = output_elements * ops_per_element * cin // m.groups35 m.total_ops = torch.Tensor([int(total_ops)])36def count_convtranspose2d(m, x, y):37 x = x[0]38 cin = m.in_channels39 cout = m.out_channels40 kh, kw = m.kernel_size41 batch_size = x.size()[0]42 out_h = y.size(2)43 out_w = y.size(3)44 # ops per output element45 # kernel_mul = kh * kw * cin46 # kernel_add = kh * kw * cin - 147 kernel_ops = multiply_adds * kh * kw * cin // m.groups48 bias_ops = 1 if m.bias is not None else 049 ops_per_element = kernel_ops + bias_ops50 # total ops51 # num_out_elements = y.numel()52 # output_elements = batch_size * out_w * out_h * cout53 ops_per_element = m.weight.nelement()54 output_elements = y.nelement()55 total_ops = output_elements * ops_per_element56 m.total_ops = torch.Tensor([int(total_ops)])57def count_bn(m, x, y):58 x = x[0]59 nelements = x.numel()60 # subtract, divide, gamma, beta61 total_ops = 4 * nelements62 m.total_ops = torch.Tensor([int(total_ops)])63def count_relu(m, x, y):64 x = x[0]65 nelements = x.numel()66 total_ops = nelements67 m.total_ops = torch.Tensor([int(total_ops)])68def count_sigmoid(m, x, y):69 x = x[0]70 nelements = x.numel()71 total_exp = nelements72 total_add = nelements73 total_div = nelements74 total_ops = total_exp + total_add + total_div75 m.total_ops = torch.Tensor([int(total_ops)])76def count_pixelshuffle(m, x, y):77 x = x[0]78 nelements = x.numel()79 total_ops = nelements80 m.total_ops = torch.Tensor([int(total_ops)])81def count_softmax(m, x, y):82 x = x[0]83 batch_size, nfeatures = x.size()84 total_exp = nfeatures85 total_add = nfeatures - 186 total_div = nfeatures87 total_ops = batch_size * (total_exp + total_add + total_div)88 m.total_ops = torch.Tensor([int(total_ops)])89def count_maxpool(m, x, y):90 kernel_ops = torch.prod(torch.Tensor([m.kernel_size]))91 num_elements = y.numel()92 total_ops = kernel_ops * num_elements93 m.total_ops = torch.Tensor([int(total_ops)])94def count_adap_maxpool(m, x, y):95 kernel = torch.Tensor([*(x[0].shape[2:])]) // torch.Tensor(list((m.output_size,))).squeeze()96 kernel_ops = torch.prod(kernel)97 num_elements = y.numel()98 total_ops = kernel_ops * num_elements99 m.total_ops = torch.Tensor([int(total_ops)])100def count_avgpool(m, x, y):101 total_add = torch.prod(torch.Tensor([m.kernel_size]))102 total_div = 1103 kernel_ops = total_add + total_div104 num_elements = y.numel()105 total_ops = kernel_ops * num_elements106 m.total_ops = torch.Tensor([int(total_ops)])107def count_adap_avgpool(m, x, y):108 kernel = torch.Tensor([*(x[0].shape[2:])]) // torch.Tensor(list((m.output_size,))).squeeze()109 total_add = torch.prod(kernel)110 total_div = 1111 kernel_ops = total_add + total_div112 num_elements = y.numel()113 total_ops = kernel_ops * num_elements114 m.total_ops = torch.Tensor([int(total_ops)])115def count_linear(m, x, y):116 # per output element117 total_mul = m.in_features118 total_add = m.in_features - 1119 num_elements = y.numel()120 total_ops = (total_mul + total_add) * num_elements...

Full Screen

Full Screen

config.js

Source:config.js Github

copy

Full Screen

1/**2 * Build widget configuration3 */4(function () {5 'use strict';6 angular.module('devops-dashboard').controller('ChatOpsConfigController',7 ChatOpsConfigController);8 ChatOpsConfigController.$inject = ['modalData', '$modalInstance',9 'collectorData'];10 function ChatOpsConfigController(modalData, $modalInstance, collectorData) {11 var ctrl = this;12 var widgetConfig = modalData.widgetConfig;13 ctrl.chatOpsOptions = [{14 name: 'HipChat',15 value: 'HipChat'16 }, {17 name: 'Slack',18 value: 'Slack (Not implemented)'19 }, {20 name: 'Gitter',21 value: 'Gitter (Not implemented)'22 }];23 if (!widgetConfig.options.chatOpsOption) {24 ctrl.chatOpsOption = "";25 }26 else {27 var myindex;28 for (var v = 0; v < ctrl.chatOpsOptions.length; v++) {29 if (ctrl.chatOpsOptions[v].name == widgetConfig.options.chatOpsOption.name) {30 myindex = v;31 break;32 }33 }34 ctrl.chatOpsOption = ctrl.chatOpsOptions[myindex];35 }36 ctrl.chatOpsRoomName=widgetConfig.options.chatOpsRoomName;37 ctrl.chatOpsRoomAuthToken=widgetConfig.options.chatOpsRoomAuthToken;38 ctrl.chatOpsServerUrl=widgetConfig.options.chatOpsServerUrl;39 // public variables40 ctrl.submitted = false;41 ctrl.collectors = [];42 // public methods43 ctrl.submit = submitForm;44 // Request collecters45 collectorData.collectorsByType('ChatOps').then(processCollectorsResponse);46 function processCollectorsResponse(data) {47 console.log(data);48 ctrl.collectors = data;49 }50 /*51 * function submitForm(valid, url) { ctrl.submitted = true; if (valid &&52 * ctrl.collectors.length) {53 * createCollectorItem(url).then(processCollectorItemResponse); } }54 */55 function submitForm(valid, chatOpsOption, chatOpsRoomAuthToken, chatOpsServerUrl, chatOpsRoomName) {56 ctrl.submitted = true;57 if (valid && ctrl.collectors.length) {58 createCollectorItem(chatOpsOption, chatOpsRoomAuthToken, chatOpsServerUrl, chatOpsRoomName).then(59 processCollectorItemResponse);60 }61 }62 function createCollectorItem(chatOpsOption, chatOpsRoomAuthToken, chatOpsServerUrl, chatOpsRoomName) {63 var item = {64 collectorId: _.findWhere(ctrl.collectors, {name: 'ChatOps'}).id,65 options: {66 chatOpsOption: chatOpsOption,67 chatOpsRoomAuthToken: chatOpsRoomAuthToken,68 chatOpsServerUrl: chatOpsServerUrl,69 chatOpsRoomName: chatOpsRoomName70 }71 };72 return collectorData.createCollectorItem(item);73 }74 function processCollectorItemResponse(response) {75 var postObj = {76 name: "ChatOps",77 options: {78 id: widgetConfig.options.id,79 chatOpsOption: ctrl.chatOpsOption,80 chatOpsRoomName:ctrl.chatOpsRoomName,81 chatOpsRoomAuthToken: ctrl.chatOpsRoomAuthToken,82 chatOpsServerUrl: ctrl.chatOpsServerUrl83 },84 componentId: modalData.dashboard.application.components[0].id,85 collectorItemId: response.data.id86 };87 console.log("PostObject is:"+JSON.stringify(postObj));88 // pass this new config to the modal closing so it's saved89 $modalInstance.close(postObj);90 }91 }...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-benchmark automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful