How to use reset_model method in Behave

Best Python code snippet using behave

test_base.py

Source:test_base.py Github

copy

Full Screen

...58 pt_outputs = [o.detach().numpy() for o in pt_outputs]59 for l, r in zip(ort_outputs, pt_outputs):60 assert np.allclose(l, r, atol=1e-4, rtol=1e-4, equal_nan=True)61 def test_conv_flatten_relu(self):62 reset_model(13)63 nps = [np.random.randn(1, 3, 224, 224).astype(np.float32)]64 inputs = Input(*nps)65 conv_node = Conv(inputs[0],66 np.random.randn(32, 3, 3, 3).astype(np.float32),67 np.random.randn(32).astype(np.float32))68 flatten_node = Flatten(conv_node)69 relu_node = Relu(flatten_node)70 Output(relu_node)71 self._run(list(zip(inputs, nps)))72 def test_conv_batchnorm_maxpool_flatten_add_relu(self):73 reset_model(13)74 nps = [np.random.randn(1, 3, 224, 224).astype(np.float32)]75 inputs = Input(*nps)76 conv_node = Conv(inputs[0],77 np.random.randn(32, 3, 3, 3).astype(np.float32),78 np.random.randn(32).astype(np.float32))79 bn_node = BatchNormalization(80 conv_node,81 np.ones(32,).astype(np.float32),82 np.zeros(32,).astype(np.float32),83 np.random.randn(32).astype(np.float32),84 np.abs(np.random.randn(32).astype(np.float32)),85 )86 max_pool_node = MaxPool(bn_node,87 kernel_shape=(3, 3),88 strides=(2, 2),89 pads=(0, 0, 1, 1))90 flatten_node = Flatten(max_pool_node, axis=1)91 add_node = Add(flatten_node, np.random.randn(1).astype(np.float32))92 relu_node = Relu(add_node)93 Output(relu_node)94 self._run(list(zip(inputs, nps)))95 def test_abs(self):96 reset_model(13)97 nps = [np.random.randn(1, 10).astype(np.float32)]98 inputs = Input(*nps)99 Output(Abs(inputs[0]))100 self._run(list(zip(inputs, nps)))101 def test_acos(self):102 reset_model(13)103 nps = [np.random.randn(5).astype(np.float32)]104 inputs = Input(*nps)105 Output(Acos(inputs[0]))106 self._run(list(zip(inputs, nps)))107 def test_acosh(self):108 reset_model(13)109 nps = [np.random.randn(5).astype(np.float32)]110 inputs = Input(*nps)111 Output(Acosh(inputs[0]))112 self._run(list(zip(inputs, nps)))113 def test_tanh(self):114 reset_model(13)115 nps = [np.random.randn(5).astype(np.float32)]116 inputs = Input(*nps)117 Output(Tanh(inputs[0]))118 self._run(list(zip(inputs, nps)))119 def test_add(self):120 reset_model(13)121 nps = [np.random.randn(1, 10).astype(np.float32)]122 inputs = Input(*nps)123 Output(Add(inputs[0], np.random.randn(1, 10).astype(np.float32)))124 self._run(list(zip(inputs, nps)))125 def test_sub(self):126 reset_model(13)127 nps = [np.random.randn(1, 10).astype(np.float32)]128 inputs = Input(*nps)129 Output(Sub(inputs[0], np.random.randn(1, 10).astype(np.float32)))130 self._run(list(zip(inputs, nps)))131 def test_and(self):132 reset_model(13)133 nps = [134 np.random.randint(low=0, high=1, size=(5,)).astype(bool),135 np.random.randint(low=0, high=1, size=(5,)).astype(bool)136 ]137 inputs = Input(*nps)138 Output(And(*inputs))139 self._run(list(zip(inputs, nps)))140 def test_argmax(self):141 reset_model(13)142 nps = [np.random.randn(1, 10).astype(np.float32)]143 inputs = Input(*nps)144 Output(ArgMax(inputs, axis=1))145 self._run(list(zip(inputs, nps)))146 def test_argmin(self):147 reset_model(13)148 nps = [np.random.randn(1, 10).astype(np.float32)]149 inputs = Input(*nps)150 Output(ArgMin(inputs, axis=1))151 self._run(list(zip(inputs, nps)))152 def test_asin(self):153 reset_model(13)154 nps = [np.random.randn(5).astype(np.float32)]155 inputs = Input(*nps)156 Output(Asin(inputs[0]))157 self._run(list(zip(inputs, nps)))158 def test_asinh(self):159 reset_model(13)160 nps = [np.random.randn(5).astype(np.float32)]161 inputs = Input(*nps)162 Output(Asinh(inputs[0]))163 self._run(list(zip(inputs, nps)))164 def test_atan(self):165 reset_model(13)166 nps = [np.random.randn(5).astype(np.float32)]167 inputs = Input(*nps)168 Output(Atan(inputs[0]))169 self._run(list(zip(inputs, nps)))170 def test_atanh(self):171 reset_model(13)172 nps = [np.random.randn(5).astype(np.float32)]173 inputs = Input(*nps)174 Output(Atanh(inputs[0]))175 self._run(list(zip(inputs, nps)))176 def test_avg_pool(self):177 reset_model(13)178 nps = [np.random.randn(1, 1, 5, 5).astype(np.float32)]179 inputs = Input(*nps)180 Output(AveragePool(inputs, kernel_shape=(3, 3), pads=(0, 0, 1, 1)))181 self._run(list(zip(inputs, nps)))182 def test_avg_pool_no_pad(self):183 reset_model(13)184 nps = [np.random.randn(1, 1, 6, 6).astype(np.float32)]185 inputs = Input(*nps)186 Output(AveragePool(inputs, kernel_shape=(3, 3)))187 self._run(list(zip(inputs, nps)))188 def test_bitshift_left(self):189 reset_model(13)190 nps = [np.array([1, 2]).astype(np.uint8), np.array([1, 2]).astype(np.uint8)]191 inputs = Input(*nps)192 Output(BitShift(*inputs, direction="LEFT"))193 self._run(list(zip(inputs, nps)))194 def test_bitshift_right(self):195 reset_model(13)196 nps = [np.array([1, 4]).astype(np.uint8), np.array([1, 1]).astype(np.uint8)]197 inputs = Input(*nps)198 Output(BitShift(*inputs, direction="RIGHT"))199 self._run(list(zip(inputs, nps)))200 def test_batch_normalization(self):201 reset_model(13)202 nps = [np.random.randn(1, 32, 3, 3).astype(np.float32)]203 inputs = Input(*nps)204 Output(BatchNormalization(205 inputs[0],206 np.ones(32,).astype(np.float32),207 np.zeros(32,).astype(np.float32),208 np.random.randn(32).astype(np.float32),209 np.abs(np.random.randn(32).astype(np.float32)),210 ),211 output_num=1)212 self._run(list(zip(inputs, nps)))213 def test_cast(self):214 reset_model(13)215 nps = [216 np.random.randn(1, 10).astype(np.float32),217 ]218 inputs = Input(*nps)219 Output(Cast(inputs, to=6))220 self._run(list(zip(inputs, nps)))221 def test_ceil(self):222 reset_model(13)223 nps = [np.random.randn(1, 10).astype(np.float32)]224 inputs = Input(*nps)225 Output(Ceil(inputs[0]))226 self._run(list(zip(inputs, nps)))227 def test_clip(self):228 reset_model(13)229 nps = [230 np.random.randn(1, 5).astype(np.float32),231 np.asarray(0).astype(np.float32)232 ]233 inputs = Input(*nps)234 Output(Clip(*inputs))235 self._run(list(zip(inputs, nps)))236 def test_concat(self):237 reset_model(13)238 nps = [239 np.random.randn(1, 10).astype(np.float32),240 np.random.randn(2, 10).astype(np.float32)241 ]242 inputs = Input(*nps)243 Output(Concat(inputs, axis=0))244 self._run(list(zip(inputs, nps)))245 def test_constant_add(self):246 reset_model(13)247 nps = [248 np.random.randn(1, 10).astype(np.float32),249 ]250 inputs = Input(*nps)251 constant_value = np.random.randn(1, 10).astype(np.float32)252 t = make_tensor("", 1, constant_value.shape, constant_value.flatten())253 Output(Add(inputs, Constant(value=t)))254 self._run(list(zip(inputs, nps)))255 def test_constant_of_shape(self):256 reset_model(13)257 nps = [258 np.array([2, 3]).astype(np.int64),259 ]260 inputs = Input(*nps)261 constant_value = np.random.randn(1).astype(np.float32)262 t = make_tensor("", 1, constant_value.shape, constant_value)263 Output(ConstantOfShape(inputs, value=t))264 self._run(list(zip(inputs, nps)))265 def test_conv(self):266 reset_model(13)267 nps = [np.random.randn(1, 3, 3, 3).astype(np.float32)]268 inputs = Input(*nps)269 Output(270 Conv(271 inputs[0],272 np.random.randn(32, 3, 3, 3).astype(np.float32),273 np.random.randn(32).astype(np.float32),274 ))275 self._run(list(zip(inputs, nps)))276 def test_conv_with_autopad_same(self):277 reset_model(13)278 x = np.array([[[279 [0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor280 [5., 6., 7., 8., 9.],281 [10., 11., 12., 13., 14.],282 [15., 16., 17., 18., 19.],283 [20., 21., 22., 23., 24.]284 ]]]).astype(np.float32)285 W = np.array([[[286 [1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights287 [1., 1., 1.],288 [1., 1., 1.]289 ]]]).astype(np.float32)290 nps = [x]291 inputs = Input(*nps)292 Output(293 Conv(294 inputs[0],295 W,296 auto_pad='SAME_LOWER',297 kernel_shape=[3, 3],298 strides=[2, 2],299 ))300 self._run(list(zip(inputs, nps)))301 def test_conv_transpose(self):302 reset_model(13)303 nps = [np.random.randn(1, 3, 3, 3).astype(np.float32)]304 inputs = Input(*nps)305 Output(306 ConvTranspose(307 inputs[0],308 np.random.randn(3, 32, 3, 3).astype(np.float32),309 np.random.randn(32).astype(np.float32),310 ))311 self._run(list(zip(inputs, nps)))312 def test_conv_transpose_pads(self):313 reset_model(13)314 nps = [np.random.randn(1, 1, 3, 3).astype(np.float32)]315 inputs = Input(*nps)316 Output(317 ConvTranspose(inputs[0],318 np.random.randn(1, 2, 3, 3).astype(np.float32),319 strides=[3, 2],320 pads=[1, 2, 1, 2]))321 self._run(list(zip(inputs, nps)))322 def test_conv_transpose_dilations(self):323 reset_model(13)324 nps = [np.random.randn(1, 1, 3, 3).astype(np.float32)]325 inputs = Input(*nps)326 Output(327 ConvTranspose(328 inputs[0],329 np.random.randn(1, 1, 2, 2).astype(np.float32),330 dilations=[2, 2],331 ))332 self._run(list(zip(inputs, nps)))333 def test_conv_transpose_attributes(self):334 reset_model(13)335 nps = [np.random.randn(1, 1, 3, 3).astype(np.float32)]336 inputs = Input(*nps)337 Output(338 ConvTranspose(339 inputs[0],340 np.random.randn(1, 2, 3, 3).astype(np.float32),341 strides=[3, 2],342 output_shape=[10, 8],343 output_padding=[1, 1],344 ))345 self._run(list(zip(inputs, nps)))346 def test_cos(self):347 reset_model(13)348 nps = [np.random.randn(5).astype(np.float32)]349 inputs = Input(*nps)350 Output(Cos(inputs[0]))351 self._run(list(zip(inputs, nps)))352 def test_cosh(self):353 reset_model(13)354 nps = [np.random.randn(5).astype(np.float32)]355 inputs = Input(*nps)356 Output(Cosh(inputs[0]))357 self._run(list(zip(inputs, nps)))358 def test_flatten(self):359 reset_model(13)360 nps = [np.random.randn(1, 3, 3, 3).astype(np.float32)]361 inputs = Input(*nps)362 Output(Flatten(inputs))363 self._run(list(zip(inputs, nps)))364 def test_gather(self):365 reset_model(13)366 nps = [367 np.random.randn(6, 8, 3).astype(np.float32),368 np.random.randn(*[3, 2]).astype(np.int64),369 ]370 inputs = Input(*nps)371 Output(Gather(*inputs, axis=0))372 self._run(list(zip(inputs, nps)))373 def test_gather_axis_1(self):374 reset_model(13)375 nps = [376 np.random.randn(6, 3, 3).astype(np.float32),377 np.array([[0, 2], [0, 1], [2, 0]]).astype(np.int64),378 ]379 inputs = Input(*nps)380 Output(Gather(*inputs, axis=1))381 self._run(list(zip(inputs, nps)))382 def test_gather_nd(self):383 reset_model(13)384 nps = [385 np.random.randn(6, 8, 3).astype(np.float32),386 np.random.randn(*[3, 2]).astype(np.int64),387 ]388 inputs = Input(*nps)389 Output(GatherND(*inputs, batch_dims=0))390 self._run(list(zip(inputs, nps)))391 @pytest.mark.skip(reason="Not implemented for batch_dims != 0")392 def test_gather_nd_batch_dims_1(self):393 reset_model(13)394 nps = [395 np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),396 np.array([[1], [0]]).astype(np.int64),397 ]398 inputs = Input(*nps)399 Output(GatherND(*inputs, batch_dims=1))400 self._run(list(zip(inputs, nps)))401 def test_gemm(self):402 reset_model(13)403 nps = [404 np.random.randn(2, 3).astype(np.float32),405 np.random.randn(3, 4).astype(np.float32),406 np.random.randn(2, 4).astype(np.float32),407 ]408 inputs = Input(*nps)409 Output(Gemm(*inputs))410 self._run(list(zip(inputs, nps)))411 def test_global_average_pool(self):412 reset_model(13)413 nps = [np.random.randn(2, 3, 4).astype(np.float32)]414 inputs = Input(*nps)415 Output(GlobalAveragePool(*inputs))416 self._run(list(zip(inputs, nps)))417 def test_mat_mul(self):418 reset_model(13)419 nps = [420 np.random.randn(5, 2, 3).astype(np.float32),421 np.random.randn(5, 3, 2).astype(np.float32)422 ]423 inputs = Input(*nps)424 Output(MatMul(*inputs))425 self._run(list(zip(inputs, nps)))426 def test_max(self):427 reset_model(13)428 nps = [429 np.random.randn(1, 10).astype(np.float32),430 np.random.randn(2, 10).astype(np.float32)431 ]432 inputs = Input(*nps)433 Output(Max(inputs))434 self._run(list(zip(inputs, nps)))435 def test_max_pool(self):436 reset_model(13)437 nps = [np.random.randn(1, 1, 5, 5).astype(np.float32)]438 inputs = Input(*nps)439 Output(MaxPool(inputs, kernel_shape=(3, 3), pads=(0, 0, 1, 1)),440 output_num=1)441 self._run(list(zip(inputs, nps)))442 def test_max_pool_pads(self):443 reset_model(13)444 nps = [np.random.randn(1, 1, 4, 4).astype(np.float32)]445 inputs = Input(*nps)446 Output(MaxPool(inputs, kernel_shape=(3, 3), pads=(1, 1, 1, 1)),447 output_num=1)448 self._run(list(zip(inputs, nps)))449 def test_mul(self):450 reset_model(13)451 nps = [452 np.random.randn(1, 2, 3).astype(np.float32),453 np.random.randn(1, 2, 3).astype(np.float32)454 ]455 inputs = Input(*nps)456 Output(Mul(*inputs))457 self._run(list(zip(inputs, nps)))458 def test_non_max_suppression_center_point_box_format(self):459 reset_model(13)460 boxes = np.array([[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0],461 [0.5, 0.4, 1.0, 1.0], [0.5, 10.5, 1.0, 1.0],462 [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0,463 1.0]]]).astype(np.float32)464 scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)465 max_output_boxes_per_class = np.array([3]).astype(np.int64)466 iou_threshold = np.array([0.5]).astype(np.float32)467 score_threshold = np.array([0.0]).astype(np.float32)468 selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0,469 5]]).astype(np.int64)470 inputs = Input(*[boxes, scores])471 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,472 iou_threshold))473 self._run(list(zip(inputs, [boxes, scores])))474 def test_non_max_suppression_flipped_coordinates(self):475 reset_model(13)476 boxes = np.array([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.1, 1.0, 1.1],477 [0.0, 0.9, 1.0, -0.1], [0.0, 10.0, 1.0, 11.0],478 [1.0, 10.1, 0.0, 11.1], [1.0, 101.0, 0.0,479 100.0]]]).astype(np.float32)480 scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)481 max_output_boxes_per_class = np.array([5]).astype(np.int64)482 iou_threshold = np.array([0.5]).astype(np.float32)483 score_threshold = np.array([0.0]).astype(np.float32)484 selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0,485 5]]).astype(np.int64)486 inputs = Input(*[boxes, scores])487 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,488 iou_threshold))489 self._run(list(zip(inputs, [boxes, scores])))490 def test_non_max_suppression_identical_boxes(self):491 reset_model(13)492 boxes = np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0],493 [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0],494 [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0],495 [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0],496 [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0,497 1.0]]]).astype(np.float32)498 scores = np.array([[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9,499 0.9]]]).astype(np.float32)500 max_output_boxes_per_class = np.array([3]).astype(np.int64)501 iou_threshold = np.array([0.5]).astype(np.float32)502 score_threshold = np.array([0.0]).astype(np.float32)503 selected_indices = np.array([[0, 0, 0]]).astype(np.int64)504 inputs = Input(*[boxes, scores])505 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,506 iou_threshold))507 self._run(list(zip(inputs, [boxes, scores])))508 def test_non_max_suppression_limit_output_size(self):509 reset_model(13)510 boxes = np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],511 [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],512 [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,513 101.0]]]).astype(np.float32)514 scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)515 max_output_boxes_per_class = np.array([2]).astype(np.int64)516 iou_threshold = np.array([0.5]).astype(np.float32)517 score_threshold = np.array([0.0]).astype(np.float32)518 selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)519 inputs = Input(*[boxes, scores])520 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,521 iou_threshold))522 self._run(list(zip(inputs, [boxes, scores])))523 def test_non_max_suppression_single_box(self):524 reset_model(13)525 boxes = np.array([[[0.0, 0.0, 1.0, 1.0]]]).astype(np.float32)526 scores = np.array([[[0.9]]]).astype(np.float32)527 max_output_boxes_per_class = np.array([3]).astype(np.int64)528 iou_threshold = np.array([0.5]).astype(np.float32)529 score_threshold = np.array([0.0]).astype(np.float32)530 inputs = Input(*[boxes, scores])531 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,532 iou_threshold))533 self._run(list(zip(inputs, [boxes, scores])))534 def test_non_max_suppression_suppress_by_IOU(self):535 reset_model(13)536 boxes = np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],537 [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],538 [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,539 101.0]]]).astype(np.float32)540 scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)541 max_output_boxes_per_class = np.array([3]).astype(np.int64)542 iou_threshold = np.array([0.5]).astype(np.float32)543 score_threshold = np.array([0.0]).astype(np.float32)544 selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0,545 5]]).astype(np.int64)546 inputs = Input(*[boxes, scores])547 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,548 iou_threshold))549 self._run(list(zip(inputs, [boxes, scores])))550 def test_non_max_suppression_suppress_by_IOU_and_scores(self):551 reset_model(13)552 boxes = np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],553 [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],554 [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,555 101.0]]]).astype(np.float32)556 scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)557 max_output_boxes_per_class = np.array([3]).astype(np.int64)558 iou_threshold = np.array([0.5]).astype(np.float32)559 score_threshold = np.array([0.4]).astype(np.float32)560 selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)561 inputs = Input(*[boxes, scores])562 Output(563 NonMaxSuppression(*inputs, max_output_boxes_per_class, iou_threshold,564 score_threshold))565 self._run(list(zip(inputs, [boxes, scores])))566 def test_non_max_suppression_two_batches(self):567 reset_model(13)568 boxes = np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],569 [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],570 [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]],571 [[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],572 [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],573 [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,574 101.0]]]).astype(np.float32)575 scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]],576 [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)577 max_output_boxes_per_class = np.array([2]).astype(np.int64)578 iou_threshold = np.array([0.5]).astype(np.float32)579 score_threshold = np.array([0.0]).astype(np.float32)580 selected_indices = np.array([[0, 0, 3], [0, 0, 0], [1, 0, 3],581 [1, 0, 0]]).astype(np.int64)582 inputs = Input(*[boxes, scores])583 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,584 iou_threshold))585 self._run(list(zip(inputs, [boxes, scores])))586 def test_non_max_suppression_two_classes(self):587 reset_model(13)588 boxes = np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],589 [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],590 [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,591 101.0]]]).astype(np.float32)592 scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],593 [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)594 max_output_boxes_per_class = np.array([2]).astype(np.int64)595 iou_threshold = np.array([0.5]).astype(np.float32)596 score_threshold = np.array([0.0]).astype(np.float32)597 selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 1, 3],598 [0, 1, 0]]).astype(np.int64)599 inputs = Input(*[boxes, scores])600 Output(NonMaxSuppression(*inputs, max_output_boxes_per_class,601 iou_threshold))602 self._run(list(zip(inputs, [boxes, scores])))603 def test_non_zero(self):604 reset_model(13)605 nps = [np.array([[1, 0], [1, 1]], dtype=bool)]606 inputs = Input(*nps)607 Output(NonZero(*inputs))608 self._run(list(zip(inputs, nps)))609 def test_pad(self):610 reset_model(13)611 nps = [612 np.random.randn(1, 2, 3).astype(np.float32),613 ]614 inputs = Input(*nps)615 Output(Pad(*inputs, np.array([0, 0, 1, 0, 0, 2])))616 self._run(list(zip(inputs, nps)))617 def test_pad_5D(self):618 reset_model(13)619 nps = [620 np.random.randn(1, 2, 3, 4, 5).astype(np.float32),621 ]622 inputs = Input(*nps)623 Output(624 Pad(*inputs, np.array([0, 0, 1, 2, 3, 0, 0, 2, 3, 4]),625 np.array([1.0]).astype(np.float32)))626 self._run(list(zip(inputs, nps)))627 def test_pad_reflect(self):628 reset_model(13)629 nps = [630 np.random.randn(1, 2, 3, 4).astype(np.float32),631 ]632 inputs = Input(*nps)633 Output(634 Pad(*inputs,635 np.array([0, 0, 1, 2, 0, 0, 2, 3]),636 np.array([1.0]).astype(np.float32),637 mode="reflect"))638 self._run(list(zip(inputs, nps)))639 def test_reciprocal(self):640 reset_model(13)641 nps = [np.random.randn(1, 10).astype(np.float32)]642 inputs = Input(*nps)643 Output(Reciprocal(inputs[0]))644 self._run(list(zip(inputs, nps)))645 def test_reduce_prod(self):646 reset_model(13)647 nps = [np.random.randn(1, 2, 3).astype(np.float32)]648 inputs = Input(*nps)649 Output(ReduceProd(inputs[0], axes=np.array((1, 2)).astype(np.int64)))650 self._run(list(zip(inputs, nps)))651 def test_reduce_sum(self):652 reset_model(13)653 nps = [np.random.randn(1, 2, 3).astype(np.float32)]654 inputs = Input(*nps)655 Output(ReduceSum(inputs[0], np.array((1, 2)).astype(np.int64)))656 self._run(list(zip(inputs, nps)))657 def test_relu(self):658 reset_model(13)659 nps = [np.random.randn(1, 5).astype(np.float32)]660 inputs = Input(*nps)661 Output(Relu(inputs))662 self._run(list(zip(inputs, nps)))663 def test_elu(self):664 reset_model(13)665 nps = [np.random.randn(1, 5).astype(np.float32)]666 inputs = Input(*nps)667 Output(Elu(inputs))668 self._run(list(zip(inputs, nps)))669 def test_reshape(self):670 reset_model(13)671 nps = [np.random.randn(4,).astype(np.float32)]672 inputs = Input(*nps)673 Output(Reshape(inputs[0], np.array((2, 2)).astype(np.int64)))674 self._run(list(zip(inputs, nps)))675 def test_resize_scales_nearest(self):676 reset_model(13)677 nps = [678 np.array([[[679 [1, 2, 3, 4],680 [5, 6, 7, 8],681 ]]], dtype=np.float32),682 np.array([], dtype=np.float32),683 ]684 inputs = Input(*nps)685 Output(686 Resize(687 *inputs,688 np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32),689 mode="nearest",690 ))691 self._run(list(zip(inputs, nps)))692 def test_resize_downsample_sizes_linear_pytorch_half_pixel(self):693 reset_model(13)694 nps = [695 np.array([[[696 [1, 2, 3, 4],697 [5, 6, 7, 8],698 [9, 10, 11, 12],699 [13, 14, 15, 16],700 ]]],701 dtype=np.float32),702 np.array([], dtype=np.float32),703 np.array([], dtype=np.float32),704 ]705 inputs = Input(*nps)706 Output(707 Resize(708 *inputs,709 np.array([1, 1, 3, 1], dtype=np.int64),710 mode='linear',711 # Changed from pytorch_half_pixel(1.8.1) to half_pixel(1.9.0) due to712 # https://github.com/pytorch/pytorch/issues/62237713 coordinate_transformation_mode='half_pixel'))714 self._run(list(zip(inputs, nps)))715 def test_resize_pt_nearest(self):716 reset_model(13)717 nps = [718 np.array([[[[1., 2., 0.], [3., 4., 0.], [0., 0., 0.]]]],719 dtype=np.float32),720 np.array([], dtype=np.float32),721 ]722 inputs = Input(*nps)723 Output(724 Resize(725 *inputs,726 np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32),727 mode="nearest",728 ))729 self._run(list(zip(inputs, nps)))730 def test_resize_pt_bilinear(self):731 reset_model(13)732 nps = [733 np.array([[[[1., 2., 0.], [3., 4., 0.], [0., 0., 0.]]]],734 dtype=np.float32),735 np.array([], dtype=np.float32),736 ]737 inputs = Input(*nps)738 Output(739 Resize(740 *inputs,741 np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32),742 mode="linear",743 ))744 self._run(list(zip(inputs, nps)))745 def test_resize_pt_bilinear_align_corners(self):746 reset_model(13)747 nps = [748 np.array([[[[1., 2., 0.], [3., 4., 0.], [0., 0., 0.]]]],749 dtype=np.float32),750 np.array([], dtype=np.float32),751 ]752 inputs = Input(*nps)753 Output(754 Resize(755 *inputs,756 np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32),757 mode="linear",758 coordinate_transformation_mode="align_corners",759 ))760 self._run(list(zip(inputs, nps)))761 def test_roi_align(self):762 reset_model(13)763 nps = [764 np.array(765 [[[766 [767 0.2764,768 0.7150,769 0.1958,770 0.3416,771 0.4638,772 0.0259,773 0.2963,774 0.6518,775 0.4856,776 0.7250,777 ],778 [779 0.9637,780 0.0895,781 0.2919,782 0.6753,783 0.0234,784 0.6132,785 0.8085,786 0.5324,787 0.8992,788 0.4467,789 ],790 [791 0.3265,792 0.8479,793 0.9698,794 0.2471,795 0.9336,796 0.1878,797 0.4766,798 0.4308,799 0.3400,800 0.2162,801 ],802 [803 0.0206,804 0.1720,805 0.2155,806 0.4394,807 0.0653,808 0.3406,809 0.7724,810 0.3921,811 0.2541,812 0.5799,813 ],814 [815 0.4062,816 0.2194,817 0.4473,818 0.4687,819 0.7109,820 0.9327,821 0.9815,822 0.6320,823 0.1728,824 0.6119,825 ],826 [827 0.3097,828 0.1283,829 0.4984,830 0.5068,831 0.4279,832 0.0173,833 0.4388,834 0.0430,835 0.4671,836 0.7119,837 ],838 [839 0.1011,840 0.8477,841 0.4726,842 0.1777,843 0.9923,844 0.4042,845 0.1869,846 0.7795,847 0.9946,848 0.9689,849 ],850 [851 0.1366,852 0.3671,853 0.7011,854 0.6234,855 0.9867,856 0.5585,857 0.6985,858 0.5609,859 0.8788,860 0.9928,861 ],862 [863 0.5697,864 0.8511,865 0.6711,866 0.9406,867 0.8751,868 0.7496,869 0.1650,870 0.1049,871 0.1559,872 0.2514,873 ],874 [875 0.7012,876 0.4056,877 0.7879,878 0.3461,879 0.0415,880 0.2998,881 0.5094,882 0.3727,883 0.5482,884 0.0502,885 ],886 ]]],887 dtype=np.float32,888 )889 ]890 inputs = Input(*nps)891 Output(892 RoiAlign(893 inputs[0],894 np.array([[0, 0, 9, 9], [0, 5, 4, 9], [5, 5, 9, 9]],895 dtype=np.float32),896 np.array([0, 0, 0], dtype=np.int64),897 spatial_scale=1.0,898 output_height=5,899 output_width=5,900 sampling_ratio=2,901 ))902 self._run(list(zip(inputs, nps)))903 def test_scatter_elements_with_axis(self):904 reset_model(13)905 nps = [np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)]906 inputs = Input(*nps)907 Output(908 ScatterElements(inputs[0],909 np.array([[1, 3]], dtype=np.int64),910 np.array([[1.1, 2.1]], dtype=np.float32),911 axis=1))912 self._run(list(zip(inputs, nps)))913 def test_scatter_elements_without_axis(self):914 reset_model(13)915 nps = [np.zeros((3, 3), dtype=np.float32)]916 inputs = Input(*nps)917 Output(918 ScatterElements(919 inputs[0], np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64),920 np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)))921 self._run(list(zip(inputs, nps)))922 def test_scatter_elements_with_negative_axis(self):923 reset_model(13)924 nps = [np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)]925 inputs = Input(*nps)926 Output(927 ScatterElements(inputs[0],928 np.array([[1, 3]], dtype=np.int64),929 np.array([[1.1, 2.1]], dtype=np.float32),930 axis=-1))931 self._run(list(zip(inputs, nps)))932 def test_scatter_elements(self):933 reset_model(13)934 nps = [np.zeros(shape=(10, 7, 7), dtype=np.float32)]935 inputs = Input(*nps)936 Output(937 ScatterElements(inputs[0],938 np.random.randint(low=0, high=5, size=(5, 7, 7)),939 np.random.randn(5, 7, 7).astype(np.float32),940 axis=0))941 self._run(list(zip(inputs, nps)))942 def test_shape(self):943 reset_model(13)944 nps = [np.random.randn(1, 2, 3, 4).astype(np.float32)]945 inputs = Input(*nps)946 Output(Shape(inputs[0]))947 self._run(list(zip(inputs, nps)))948 def test_sigmoid(self):949 reset_model(13)950 nps = [np.random.randn(1, 10).astype(np.float32)]951 inputs = Input(*nps)952 Output(Sigmoid(inputs[0]))953 self._run(list(zip(inputs, nps)))954 def test_slice(self):955 reset_model(13)956 nps = [np.random.randn(5, 5).astype(np.float32)]957 inputs = Input(*nps)958 Output(959 Slice(inputs[0],960 np.array((2, 3)).astype(np.int64),961 np.array((4, 5)).astype(np.int64)))962 self._run(list(zip(inputs, nps)))963 def test_softmax(self):964 reset_model(13)965 nps = [np.random.randn(1, 10).astype(np.float32)]966 inputs = Input(*nps)967 Output(Softmax(inputs[0]))968 self._run(list(zip(inputs, nps)))969 def test_split(self):970 reset_model(13)971 nps = [np.random.randn(1, 10).astype(np.float32)]972 inputs = Input(*nps)973 Output(Split(inputs, split=np.array([2, 8]), axis=1))974 self._run(list(zip(inputs, nps)))975 def test_sqrt(self):976 reset_model(13)977 nps = [np.random.randn(1, 10).astype(np.float32)]978 inputs = Input(*nps)979 Output(Sqrt(inputs[0]))980 self._run(list(zip(inputs, nps)))981 def test_squeeze(self):982 reset_model(13)983 nps = [np.random.randn(1, 10, 1, 1).astype(np.float32)]984 inputs = Input(*nps)985 Output(Squeeze(inputs[0], np.array(([2, 3]))))986 self._run(list(zip(inputs, nps)))987 def test_squeeze_no_axes(self):988 reset_model(13)989 nps = [np.random.randn(1, 10, 1, 1).astype(np.float32)]990 inputs = Input(*nps)991 Output(Squeeze(inputs[0]))992 self._run(list(zip(inputs, nps)))993 def test_topk(self):994 reset_model(13)995 nps = [np.random.randn(1, 2, 3, 4).astype(np.float32)]996 inputs = Input(*nps)997 Output(TopK(inputs[0], np.asarray([3])))998 self._run(list(zip(inputs, nps)))999 def test_topk_attrs(self):1000 reset_model(13)1001 nps = [np.random.randn(1, 1, 5, 1).astype(np.float32)]1002 inputs = Input(*nps)1003 Output(TopK(inputs[0], np.asarray([3]), axis=2, largest=0, sorted=1))1004 self._run(list(zip(inputs, nps)))1005 def test_transpose(self):1006 reset_model(13)1007 nps = [np.random.randn(1, 2, 3, 4).astype(np.float32)]1008 inputs = Input(*nps)1009 Output(Transpose(inputs[0], perm=[0, 2, 3, 1]))1010 self._run(list(zip(inputs, nps)))1011 def test_transpose_no_perm(self):1012 reset_model(13)1013 nps = [np.random.randn(1, 2, 3, 4).astype(np.float32)]1014 inputs = Input(*nps)1015 Output(Transpose(inputs[0]))1016 self._run(list(zip(inputs, nps)))1017 def test_unsqueeze(self):1018 reset_model(13)1019 nps = [np.random.randn(1, 2).astype(np.float32)]1020 inputs = Input(*nps)1021 Output(Unsqueeze(inputs[0], np.array(([2, 3]))))1022 self._run(list(zip(inputs, nps)))1023if __name__ == '__main__':...

Full Screen

Full Screen

model.py

Source:model.py Github

copy

Full Screen

1import numpy as np2def map_values(data, mapping):3 # maps values of numpy array using a mapping dictionary4 return np.vectorize(mapping.get)(data)5def sign(x):6 # implements np.sign function as sets random choice of 1 or -1 for 0 results7 x_out = np.sign(x)8 if x_out == 0:9 x_out = np.random.choice([-1, 1])10 return x_out11# load data into training and test numpy array, and dictionary into dict()12directory = 'data/'13files = [directory + file for file in ['pa3dictionary.txt', 'pa3test.txt', 'pa3train.txt']]14row_to_word = dict(enumerate(list(np.loadtxt(files[0], dtype='str'))))15train = np.loadtxt(files[1], dtype='int', delimiter=' ')16test = np.loadtxt(files[2], dtype='int', delimiter=' ')17num_features = train.shape[1] - 118# map 2 / 1 labels to -1 / 119mapping = {2: -1, 1: 1}20train[:, -1] = map_values(train[:, -1], mapping)21test[:, -1] = map_values(test[:, -1], mapping)22# simple perceptron class23class SimplePerceptron:24 def __init__(self):25 self.num_features = None26 self.w = None27 def fit(self, labeled_data, reset_model=True):28 # fit model to labeled_data, where29 # labeled_data is 2d array, with columns as features and rows as samples30 # final column is label, must be -1 or 131 X = labeled_data[:, :-1]32 y = labeled_data[:, -1]33 if reset_model:34 self.num_features = X.size[1]35 self.w = np.zeros(self.num_features + 1)36 for i in range(y.size):37 x = np.append(X[i], [1]) # append 1 to the x, for bias term38 yhat = self.predict(x) # estimate class using current weights39 if y[i] != yhat: # if incorrect decision40 self.w = np.add(self.w, np.multiply(y[i], x)) # update weights41 def predict(self, x):42 return sign(np.dot(x, self.w))43 def test(self, labeled_data):44 # test classification of model using labeled_data, where45 # labeled_data is 2d array, with columns as features and rows as samples46 # final column is label, must be -1 or 147 X = labeled_data[:, :-1]48 y = labeled_data[:, -1]49 num_errors = 050 for i in range(y.size):51 x = np.append(X[i], [1]) # append 1 to the x, for bias term52 yhat = self.predict(x) # estimate class53 if y[i] != yhat: # if incorrect decision54 num_errors += 155 return num_errors / y.size56# train model over 5 passes of the training data and report training & test error each time57simple_perceptron = SimplePerceptron()58print('## Simple Perceptron ##')59num_passes = 560for i in range(num_passes):61 print('# Pass ' + str(i+1) + ' #')62 simple_perceptron.fit(train, reset_model=(i == 0)) # fit model and reset_model when i==063 print('Training error: ' + '{:.4f}'.format(simple_perceptron.test(train)))64 print('Testing error: ' + '{:.4f}'.format(simple_perceptron.test(test)))65 print('')66print('')67print('')68# kernelised perceptron class69class KernelisedPerceptron:70 def __init__(self):71 self.num_samples = None72 self.num_features = None73 self.alpha = None74 self.X_trained = None75 self.kernel = None76 def fit(self, labeled_data, reset_model=True, kernel=np.dot):77 # fit model to labeled_data using kernel, where78 # labeled_data is 2d array, with columns as features and rows as samples79 # final column is label, must be -1 or 180 X = labeled_data[:, :-1]81 y = labeled_data[:, -1]82 if reset_model:83 self.kernel = kernel84 self.X_trained = X85 self.num_samples, self.num_features = X.shape86 self.alpha = np.zeros(self.num_samples)87 for j in range(self.num_samples):88 yhat = self.predict(X[j, :])89 if y[j] != yhat:90 self.alpha[j] += y[j]91 def predict(self, x):92 # classify sample x using trained model93 yhat = 094 for i in range(self.num_samples):95 yhat += self.alpha[i] * self.kernel(self.X_trained[i, :], x)96 return sign(yhat)97 def test(self, labeled_data):98 # test classification using model on labeled_data, where99 # labeled_data is 2d array, with columns as features and rows as samples100 # final column is label, must be -1 or 1101 # returns error rate102 num_errors = 0103 X = labeled_data[:, :-1]104 y = labeled_data[:, -1]105 for j in range(self.num_samples):106 yhat = self.predict(X[j, :])107 if y[j] != yhat:108 num_errors += 1109 return num_errors / self.num_samples110# kernel functions as defined in exercise111def kernel_exp(x1, x2):112 return np.exp(-np.linalg.norm(x1-x2) / 20)113def kernel_poly(x1, x2):114 return (np.dot(x1, x2) + 10) ** 2115kernel_perceptron = KernelisedPerceptron()116kernels = [np.dot, kernel_exp, kernel_poly]117messages = ['## Kernalised - dot product ##',118 '## Kernalised - exponential ##',119 '## Kernalised - polynomial ##']120# for each kernel, train model over 5 passes of the training data and report training & test error each time121num_passes = 5122for message, kernel in zip(messages, kernels):123 print(message)124 for i in range(num_passes):125 print('# Pass ' + str(i+1) + ' #')126 kernel_perceptron.fit(train, reset_model=(i == 0), kernel=kernel) # fit model and reset_model when i==0127 print('Training error: ' + '{:.5f}'.format(kernel_perceptron.test(train)))128 print('Testing error: ' + '{:.5f}'.format(kernel_perceptron.test(test)))129 print('')130 print('')...

Full Screen

Full Screen

Find_Best_Learning_Rate.py

Source:Find_Best_Learning_Rate.py Github

copy

Full Screen

1import os2from tensorflow.python.keras.optimizers import Adam3from tensorflow.python.keras.backend import get_session4def reset_model(model):5 print('Reset model')6 session = get_session()7 for i, layer in enumerate(model.layers):8 if hasattr(layer, 'kernel_initializer'):9 model.layers[i].kernel.initializer.run(session=session)10 return model11class Find_Best_Learning_Rate(object):12 def __init__(self, train_generator=None, validation_generator=None, Model_val=None, epochs=2, learning_rate=0, upper_bound=1, scale=1.1,reset_model=False,13 out_path=os.path.join('.','Learning_rates'),metrics=['accuracy'], optimizer=Adam, loss='categorical_crossentropy',num_workers=10, steps_per_epoch=None):14 self.steps_per_epoch = self.steps_per_epoch15 if steps_per_epoch is None:16 self.steps_per_epoch = len(train_generator)17 self.num_workers = num_workers18 self.reset_model = reset_model19 self.loss = loss20 self.train_generator = train_generator21 self.validation_generator = validation_generator22 self.epochs = epochs23 self.Model_val = Model_val24 self.out_path = out_path25 self.metrics = metrics26 self.optimizer = optimizer27 if not os.path.exists(out_path):28 os.makedirs(out_path)29 self.run_over_learning_rates(learning_rate=learning_rate, upper_bound=upper_bound, scale=scale)30 def run_for_learning_rate(self, learning_rate):31 out_path = os.path.join(self.out_path, str(learning_rate) + '.txt')32 if os.path.exists(out_path):33 return None # Do not redo a run34 if self.reset_model:35 self.Model_val = reset_model(self.Model_val)36 optimizer = self.optimizer(lr=learning_rate)37 self.Model_val.compile(optimizer, loss=self.loss, metrics=self.metrics)38 history = self.Model_val.fit_generator(generator=self.train_generator, workers=self.num_workers, use_multiprocessing=False,39 max_queue_size=200,shuffle=True, epochs=self.epochs, initial_epoch=0,40 validation_data=self.validation_generator, steps_per_epoch=self.steps_per_epoch)41 fid = open(out_path,'w+')42 for key in history.history.keys():43 if key.find('val') == 0: # Only get the validation data44 fid.write(key + ',')45 fid.write(str(history.history[key])+'\n')46 fid.close()47 return None48 def run_over_learning_rates(self, learning_rate, upper_bound, scale):49 while learning_rate < upper_bound:...

Full Screen

Full Screen

test_sentiment_classifier.py

Source:test_sentiment_classifier.py Github

copy

Full Screen

...15 pos['training']16 ])17 c = 2 ** 718 classifier.c = c19 classifier.reset_model()20 n_er = self.validate(classifier, neg['validation'], 'negative')21 p_er = self.validate(classifier, pos['validation'], 'positive')22 total = Fraction(n_er.numerator + p_er.numerator,23 n_er.denominator + p_er.denominator)24 print(total)25 self.assertLess(total, 0.35)26 def test_validate_itself(self):27 """yields a zero error when it uses itself"""28 classifier = SentimentClassifier.build([29 'data/rt-polaritydata/rt-polarity.neg',30 'data/rt-polaritydata/rt-polarity.pos'31 ])32 c = 2 ** 733 classifier.c = c34 classifier.reset_model()35 n_er = self.validate(classifier,36 'data/rt-polaritydata/rt-polarity.neg',37 'negative')38 p_er = self.validate(classifier,39 'data/rt-polaritydata/rt-polarity.pos',40 'positive')41 total = Fraction(n_er.numerator + p_er.numerator,42 n_er.denominator + p_er.denominator)43 print(total)44 self.assertEqual(total, 0)45 def validate(self, classifier, file, sentiment):46 total = 047 misses = 048 with(io.open(file, errors='ignore')) as f:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Behave automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful