How to use Feature method in root

Best JavaScript code snippet using root

feature_map_generators_test.py

Source:feature_map_generators_test.py Github

copy

Full Screen

1# Copyright 2017 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Tests for feature map generators."""16from absl.testing import parameterized17import numpy as np18import tensorflow as tf19from google.protobuf import text_format20from object_detection.builders import hyperparams_builder21from object_detection.models import feature_map_generators22from object_detection.protos import hyperparams_pb223INCEPTION_V2_LAYOUT = {24 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],25 'layer_depth': [-1, -1, -1, 512, 256, 256],26 'anchor_strides': [16, 32, 64, -1, -1, -1],27 'layer_target_norm': [20.0, -1, -1, -1, -1, -1],28}29INCEPTION_V3_LAYOUT = {30 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],31 'layer_depth': [-1, -1, -1, 512, 256, 128],32 'anchor_strides': [16, 32, 64, -1, -1, -1],33 'aspect_ratios': [1.0, 2.0, 1.0/2, 3.0, 1.0/3]34}35EMBEDDED_SSD_MOBILENET_V1_LAYOUT = {36 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''],37 'layer_depth': [-1, -1, 512, 256, 256],38 'conv_kernel_size': [-1, -1, 3, 3, 2],39}40SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT = {41 'from_layer': ['Conv2d_13_pointwise', '', '', ''],42 'layer_depth': [-1, 256, 256, 256],43}44@parameterized.parameters(45 {'use_keras': False},46 {'use_keras': True},47)48class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):49 def _build_conv_hyperparams(self):50 conv_hyperparams = hyperparams_pb2.Hyperparams()51 conv_hyperparams_text_proto = """52 regularizer {53 l2_regularizer {54 }55 }56 initializer {57 truncated_normal_initializer {58 }59 }60 """61 text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)62 return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)63 def _build_feature_map_generator(self, feature_map_layout, use_keras,64 pool_residual=False):65 if use_keras:66 return feature_map_generators.KerasMultiResolutionFeatureMaps(67 feature_map_layout=feature_map_layout,68 depth_multiplier=1,69 min_depth=32,70 insert_1x1_conv=True,71 freeze_batchnorm=False,72 is_training=True,73 conv_hyperparams=self._build_conv_hyperparams(),74 name='FeatureMaps'75 )76 else:77 def feature_map_generator(image_features):78 return feature_map_generators.multi_resolution_feature_maps(79 feature_map_layout=feature_map_layout,80 depth_multiplier=1,81 min_depth=32,82 insert_1x1_conv=True,83 image_features=image_features,84 pool_residual=pool_residual)85 return feature_map_generator86 def test_get_expected_feature_map_shapes_with_inception_v2(self, use_keras):87 image_features = {88 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),89 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),90 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)91 }92 feature_map_generator = self._build_feature_map_generator(93 feature_map_layout=INCEPTION_V2_LAYOUT,94 use_keras=use_keras95 )96 feature_maps = feature_map_generator(image_features)97 expected_feature_map_shapes = {98 'Mixed_3c': (4, 28, 28, 256),99 'Mixed_4c': (4, 14, 14, 576),100 'Mixed_5c': (4, 7, 7, 1024),101 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),102 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),103 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}104 init_op = tf.global_variables_initializer()105 with self.test_session() as sess:106 sess.run(init_op)107 out_feature_maps = sess.run(feature_maps)108 out_feature_map_shapes = dict(109 (key, value.shape) for key, value in out_feature_maps.items())110 self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)111 def test_get_expected_feature_map_shapes_with_inception_v2_use_depthwise(112 self, use_keras):113 image_features = {114 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),115 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),116 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)117 }118 layout_copy = INCEPTION_V2_LAYOUT.copy()119 layout_copy['use_depthwise'] = True120 feature_map_generator = self._build_feature_map_generator(121 feature_map_layout=layout_copy,122 use_keras=use_keras123 )124 feature_maps = feature_map_generator(image_features)125 expected_feature_map_shapes = {126 'Mixed_3c': (4, 28, 28, 256),127 'Mixed_4c': (4, 14, 14, 576),128 'Mixed_5c': (4, 7, 7, 1024),129 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),130 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),131 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}132 init_op = tf.global_variables_initializer()133 with self.test_session() as sess:134 sess.run(init_op)135 out_feature_maps = sess.run(feature_maps)136 out_feature_map_shapes = dict(137 (key, value.shape) for key, value in out_feature_maps.items())138 self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)139 def test_get_expected_feature_map_shapes_use_explicit_padding(140 self, use_keras):141 image_features = {142 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),143 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),144 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)145 }146 layout_copy = INCEPTION_V2_LAYOUT.copy()147 layout_copy['use_explicit_padding'] = True148 feature_map_generator = self._build_feature_map_generator(149 feature_map_layout=layout_copy,150 use_keras=use_keras151 )152 feature_maps = feature_map_generator(image_features)153 expected_feature_map_shapes = {154 'Mixed_3c': (4, 28, 28, 256),155 'Mixed_4c': (4, 14, 14, 576),156 'Mixed_5c': (4, 7, 7, 1024),157 'Mixed_5c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),158 'Mixed_5c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),159 'Mixed_5c_2_Conv2d_5_3x3_s2_256': (4, 1, 1, 256)}160 init_op = tf.global_variables_initializer()161 with self.test_session() as sess:162 sess.run(init_op)163 out_feature_maps = sess.run(feature_maps)164 out_feature_map_shapes = dict(165 (key, value.shape) for key, value in out_feature_maps.items())166 self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)167 def test_get_expected_feature_map_shapes_with_inception_v3(self, use_keras):168 image_features = {169 'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),170 'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),171 'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)172 }173 feature_map_generator = self._build_feature_map_generator(174 feature_map_layout=INCEPTION_V3_LAYOUT,175 use_keras=use_keras176 )177 feature_maps = feature_map_generator(image_features)178 expected_feature_map_shapes = {179 'Mixed_5d': (4, 35, 35, 256),180 'Mixed_6e': (4, 17, 17, 576),181 'Mixed_7c': (4, 8, 8, 1024),182 'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),183 'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),184 'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)}185 init_op = tf.global_variables_initializer()186 with self.test_session() as sess:187 sess.run(init_op)188 out_feature_maps = sess.run(feature_maps)189 out_feature_map_shapes = dict(190 (key, value.shape) for key, value in out_feature_maps.items())191 self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)192 def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(193 self, use_keras):194 image_features = {195 'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512],196 dtype=tf.float32),197 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],198 dtype=tf.float32),199 }200 feature_map_generator = self._build_feature_map_generator(201 feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,202 use_keras=use_keras203 )204 feature_maps = feature_map_generator(image_features)205 expected_feature_map_shapes = {206 'Conv2d_11_pointwise': (4, 16, 16, 512),207 'Conv2d_13_pointwise': (4, 8, 8, 1024),208 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512),209 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256),210 'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)}211 init_op = tf.global_variables_initializer()212 with self.test_session() as sess:213 sess.run(init_op)214 out_feature_maps = sess.run(feature_maps)215 out_feature_map_shapes = dict(216 (key, value.shape) for key, value in out_feature_maps.items())217 self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)218 def test_feature_map_shapes_with_pool_residual_ssd_mobilenet_v1(219 self, use_keras):220 image_features = {221 'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],222 dtype=tf.float32),223 }224 feature_map_generator = self._build_feature_map_generator(225 feature_map_layout=SSD_MOBILENET_V1_WEIGHT_SHARED_LAYOUT,226 use_keras=use_keras,227 pool_residual=True228 )229 feature_maps = feature_map_generator(image_features)230 expected_feature_map_shapes = {231 'Conv2d_13_pointwise': (4, 8, 8, 1024),232 'Conv2d_13_pointwise_2_Conv2d_1_3x3_s2_256': (4, 4, 4, 256),233 'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_256': (4, 2, 2, 256),234 'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 1, 1, 256)}235 init_op = tf.global_variables_initializer()236 with self.test_session() as sess:237 sess.run(init_op)238 out_feature_maps = sess.run(feature_maps)239 out_feature_map_shapes = dict(240 (key, value.shape) for key, value in out_feature_maps.items())241 self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)242 def test_get_expected_variable_names_with_inception_v2(self, use_keras):243 image_features = {244 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),245 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),246 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)247 }248 feature_map_generator = self._build_feature_map_generator(249 feature_map_layout=INCEPTION_V2_LAYOUT,250 use_keras=use_keras251 )252 feature_maps = feature_map_generator(image_features)253 expected_slim_variables = set([254 'Mixed_5c_1_Conv2d_3_1x1_256/weights',255 'Mixed_5c_1_Conv2d_3_1x1_256/biases',256 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',257 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',258 'Mixed_5c_1_Conv2d_4_1x1_128/weights',259 'Mixed_5c_1_Conv2d_4_1x1_128/biases',260 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',261 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',262 'Mixed_5c_1_Conv2d_5_1x1_128/weights',263 'Mixed_5c_1_Conv2d_5_1x1_128/biases',264 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',265 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',266 ])267 expected_keras_variables = set([268 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',269 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',270 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',271 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',272 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',273 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',274 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',275 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',276 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',277 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',278 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',279 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',280 ])281 init_op = tf.global_variables_initializer()282 with self.test_session() as sess:283 sess.run(init_op)284 sess.run(feature_maps)285 actual_variable_set = set(286 [var.op.name for var in tf.trainable_variables()])287 if use_keras:288 self.assertSetEqual(expected_keras_variables, actual_variable_set)289 else:290 self.assertSetEqual(expected_slim_variables, actual_variable_set)291 def test_get_expected_variable_names_with_inception_v2_use_depthwise(292 self,293 use_keras):294 image_features = {295 'Mixed_3c': tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),296 'Mixed_4c': tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),297 'Mixed_5c': tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32)298 }299 layout_copy = INCEPTION_V2_LAYOUT.copy()300 layout_copy['use_depthwise'] = True301 feature_map_generator = self._build_feature_map_generator(302 feature_map_layout=layout_copy,303 use_keras=use_keras304 )305 feature_maps = feature_map_generator(image_features)306 expected_slim_variables = set([307 'Mixed_5c_1_Conv2d_3_1x1_256/weights',308 'Mixed_5c_1_Conv2d_3_1x1_256/biases',309 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/depthwise_weights',310 'Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise/biases',311 'Mixed_5c_2_Conv2d_3_3x3_s2_512/weights',312 'Mixed_5c_2_Conv2d_3_3x3_s2_512/biases',313 'Mixed_5c_1_Conv2d_4_1x1_128/weights',314 'Mixed_5c_1_Conv2d_4_1x1_128/biases',315 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/depthwise_weights',316 'Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise/biases',317 'Mixed_5c_2_Conv2d_4_3x3_s2_256/weights',318 'Mixed_5c_2_Conv2d_4_3x3_s2_256/biases',319 'Mixed_5c_1_Conv2d_5_1x1_128/weights',320 'Mixed_5c_1_Conv2d_5_1x1_128/biases',321 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/depthwise_weights',322 'Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise/biases',323 'Mixed_5c_2_Conv2d_5_3x3_s2_256/weights',324 'Mixed_5c_2_Conv2d_5_3x3_s2_256/biases',325 ])326 expected_keras_variables = set([327 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/kernel',328 'FeatureMaps/Mixed_5c_1_Conv2d_3_1x1_256_conv/bias',329 ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/'330 'depthwise_kernel'),331 ('FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_depthwise_conv/'332 'bias'),333 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/kernel',334 'FeatureMaps/Mixed_5c_2_Conv2d_3_3x3_s2_512_conv/bias',335 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/kernel',336 'FeatureMaps/Mixed_5c_1_Conv2d_4_1x1_128_conv/bias',337 ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/'338 'depthwise_kernel'),339 ('FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_depthwise_conv/'340 'bias'),341 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/kernel',342 'FeatureMaps/Mixed_5c_2_Conv2d_4_3x3_s2_256_conv/bias',343 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/kernel',344 'FeatureMaps/Mixed_5c_1_Conv2d_5_1x1_128_conv/bias',345 ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/'346 'depthwise_kernel'),347 ('FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_depthwise_conv/'348 'bias'),349 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/kernel',350 'FeatureMaps/Mixed_5c_2_Conv2d_5_3x3_s2_256_conv/bias',351 ])352 init_op = tf.global_variables_initializer()353 with self.test_session() as sess:354 sess.run(init_op)355 sess.run(feature_maps)356 actual_variable_set = set(357 [var.op.name for var in tf.trainable_variables()])358 if use_keras:359 self.assertSetEqual(expected_keras_variables, actual_variable_set)360 else:361 self.assertSetEqual(expected_slim_variables, actual_variable_set)362@parameterized.parameters({'use_native_resize_op': True, 'use_keras': False},363 {'use_native_resize_op': False, 'use_keras': False},364 {'use_native_resize_op': True, 'use_keras': True},365 {'use_native_resize_op': False, 'use_keras': True})366class FPNFeatureMapGeneratorTest(tf.test.TestCase, parameterized.TestCase):367 def _build_conv_hyperparams(self):368 conv_hyperparams = hyperparams_pb2.Hyperparams()369 conv_hyperparams_text_proto = """370 regularizer {371 l2_regularizer {372 }373 }374 initializer {375 truncated_normal_initializer {376 }377 }378 """379 text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)380 return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)381 def _build_feature_map_generator(382 self, image_features, depth, use_keras, use_bounded_activations=False,383 use_native_resize_op=False, use_explicit_padding=False,384 use_depthwise=False):385 if use_keras:386 return feature_map_generators.KerasFpnTopDownFeatureMaps(387 num_levels=len(image_features),388 depth=depth,389 is_training=True,390 conv_hyperparams=self._build_conv_hyperparams(),391 freeze_batchnorm=False,392 use_depthwise=use_depthwise,393 use_explicit_padding=use_explicit_padding,394 use_bounded_activations=use_bounded_activations,395 use_native_resize_op=use_native_resize_op,396 scope=None,397 name='FeatureMaps',398 )399 else:400 def feature_map_generator(image_features):401 return feature_map_generators.fpn_top_down_feature_maps(402 image_features=image_features,403 depth=depth,404 use_depthwise=use_depthwise,405 use_explicit_padding=use_explicit_padding,406 use_bounded_activations=use_bounded_activations,407 use_native_resize_op=use_native_resize_op)408 return feature_map_generator409 def test_get_expected_feature_map_shapes(410 self, use_native_resize_op, use_keras):411 image_features = [412 ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),413 ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),414 ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),415 ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))416 ]417 feature_map_generator = self._build_feature_map_generator(418 image_features=image_features,419 depth=128,420 use_keras=use_keras,421 use_native_resize_op=use_native_resize_op)422 feature_maps = feature_map_generator(image_features)423 expected_feature_map_shapes = {424 'top_down_block2': (4, 8, 8, 128),425 'top_down_block3': (4, 4, 4, 128),426 'top_down_block4': (4, 2, 2, 128),427 'top_down_block5': (4, 1, 1, 128)428 }429 init_op = tf.global_variables_initializer()430 with self.test_session() as sess:431 sess.run(init_op)432 out_feature_maps = sess.run(feature_maps)433 out_feature_map_shapes = {key: value.shape434 for key, value in out_feature_maps.items()}435 self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)436 def test_get_expected_feature_map_shapes_with_explicit_padding(437 self, use_native_resize_op, use_keras):438 image_features = [439 ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),440 ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),441 ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),442 ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))443 ]444 feature_map_generator = self._build_feature_map_generator(445 image_features=image_features,446 depth=128,447 use_keras=use_keras,448 use_explicit_padding=True,449 use_native_resize_op=use_native_resize_op)450 feature_maps = feature_map_generator(image_features)451 expected_feature_map_shapes = {452 'top_down_block2': (4, 8, 8, 128),453 'top_down_block3': (4, 4, 4, 128),454 'top_down_block4': (4, 2, 2, 128),455 'top_down_block5': (4, 1, 1, 128)456 }457 init_op = tf.global_variables_initializer()458 with self.test_session() as sess:459 sess.run(init_op)460 out_feature_maps = sess.run(feature_maps)461 out_feature_map_shapes = {key: value.shape462 for key, value in out_feature_maps.items()}463 self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)464 def test_use_bounded_activations_add_operations(465 self, use_native_resize_op, use_keras):466 tf_graph = tf.Graph()467 with tf_graph.as_default():468 image_features = [('block2',469 tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),470 ('block3',471 tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),472 ('block4',473 tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),474 ('block5',475 tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))]476 feature_map_generator = self._build_feature_map_generator(477 image_features=image_features,478 depth=128,479 use_keras=use_keras,480 use_bounded_activations=True,481 use_native_resize_op=use_native_resize_op)482 feature_map_generator(image_features)483 if use_keras:484 expected_added_operations = dict.fromkeys([485 'FeatureMaps/top_down/clip_by_value/clip_by_value',486 'FeatureMaps/top_down/clip_by_value_1/clip_by_value',487 'FeatureMaps/top_down/clip_by_value_2/clip_by_value',488 'FeatureMaps/top_down/clip_by_value_3/clip_by_value',489 'FeatureMaps/top_down/clip_by_value_4/clip_by_value',490 'FeatureMaps/top_down/clip_by_value_5/clip_by_value',491 'FeatureMaps/top_down/clip_by_value_6/clip_by_value',492 ])493 else:494 expected_added_operations = dict.fromkeys([495 'top_down/clip_by_value', 'top_down/clip_by_value_1',496 'top_down/clip_by_value_2', 'top_down/clip_by_value_3',497 'top_down/clip_by_value_4', 'top_down/clip_by_value_5',498 'top_down/clip_by_value_6'499 ])500 op_names = {op.name: None for op in tf_graph.get_operations()}501 self.assertDictContainsSubset(expected_added_operations, op_names)502 def test_use_bounded_activations_clip_value(503 self, use_native_resize_op, use_keras):504 tf_graph = tf.Graph()505 with tf_graph.as_default():506 image_features = [507 ('block2', 255 * tf.ones([4, 8, 8, 256], dtype=tf.float32)),508 ('block3', 255 * tf.ones([4, 4, 4, 256], dtype=tf.float32)),509 ('block4', 255 * tf.ones([4, 2, 2, 256], dtype=tf.float32)),510 ('block5', 255 * tf.ones([4, 1, 1, 256], dtype=tf.float32))511 ]512 feature_map_generator = self._build_feature_map_generator(513 image_features=image_features,514 depth=128,515 use_keras=use_keras,516 use_bounded_activations=True,517 use_native_resize_op=use_native_resize_op)518 feature_map_generator(image_features)519 if use_keras:520 expected_clip_by_value_ops = dict.fromkeys([521 'FeatureMaps/top_down/clip_by_value/clip_by_value',522 'FeatureMaps/top_down/clip_by_value_1/clip_by_value',523 'FeatureMaps/top_down/clip_by_value_2/clip_by_value',524 'FeatureMaps/top_down/clip_by_value_3/clip_by_value',525 'FeatureMaps/top_down/clip_by_value_4/clip_by_value',526 'FeatureMaps/top_down/clip_by_value_5/clip_by_value',527 'FeatureMaps/top_down/clip_by_value_6/clip_by_value',528 ])529 else:530 expected_clip_by_value_ops = [531 'top_down/clip_by_value', 'top_down/clip_by_value_1',532 'top_down/clip_by_value_2', 'top_down/clip_by_value_3',533 'top_down/clip_by_value_4', 'top_down/clip_by_value_5',534 'top_down/clip_by_value_6'535 ]536 # Gathers activation tensors before and after clip_by_value operations.537 activations = {}538 for clip_by_value_op in expected_clip_by_value_ops:539 clip_input_tensor = tf_graph.get_operation_by_name(540 '{}/Minimum'.format(clip_by_value_op)).inputs[0]541 clip_output_tensor = tf_graph.get_tensor_by_name(542 '{}:0'.format(clip_by_value_op))543 activations.update({544 'before_{}'.format(clip_by_value_op): clip_input_tensor,545 'after_{}'.format(clip_by_value_op): clip_output_tensor,546 })547 expected_lower_bound = -feature_map_generators.ACTIVATION_BOUND548 expected_upper_bound = feature_map_generators.ACTIVATION_BOUND549 init_op = tf.global_variables_initializer()550 with self.test_session() as session:551 session.run(init_op)552 activations_output = session.run(activations)553 for clip_by_value_op in expected_clip_by_value_ops:554 # Before clipping, activations are beyound the expected bound because555 # of large input image_features values.556 activations_before_clipping = (557 activations_output['before_{}'.format(clip_by_value_op)])558 before_clipping_lower_bound = np.amin(activations_before_clipping)559 before_clipping_upper_bound = np.amax(activations_before_clipping)560 self.assertLessEqual(before_clipping_lower_bound,561 expected_lower_bound)562 self.assertGreaterEqual(before_clipping_upper_bound,563 expected_upper_bound)564 # After clipping, activations are bounded as expectation.565 activations_after_clipping = (566 activations_output['after_{}'.format(clip_by_value_op)])567 after_clipping_lower_bound = np.amin(activations_after_clipping)568 after_clipping_upper_bound = np.amax(activations_after_clipping)569 self.assertGreaterEqual(after_clipping_lower_bound,570 expected_lower_bound)571 self.assertLessEqual(after_clipping_upper_bound, expected_upper_bound)572 def test_get_expected_feature_map_shapes_with_depthwise(573 self, use_native_resize_op, use_keras):574 image_features = [575 ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),576 ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),577 ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),578 ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))579 ]580 feature_map_generator = self._build_feature_map_generator(581 image_features=image_features,582 depth=128,583 use_keras=use_keras,584 use_depthwise=True,585 use_native_resize_op=use_native_resize_op)586 feature_maps = feature_map_generator(image_features)587 expected_feature_map_shapes = {588 'top_down_block2': (4, 8, 8, 128),589 'top_down_block3': (4, 4, 4, 128),590 'top_down_block4': (4, 2, 2, 128),591 'top_down_block5': (4, 1, 1, 128)592 }593 init_op = tf.global_variables_initializer()594 with self.test_session() as sess:595 sess.run(init_op)596 out_feature_maps = sess.run(feature_maps)597 out_feature_map_shapes = {key: value.shape598 for key, value in out_feature_maps.items()}599 self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)600 def test_get_expected_variable_names(601 self, use_native_resize_op, use_keras):602 image_features = [603 ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),604 ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),605 ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),606 ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))607 ]608 feature_map_generator = self._build_feature_map_generator(609 image_features=image_features,610 depth=128,611 use_keras=use_keras,612 use_native_resize_op=use_native_resize_op)613 feature_maps = feature_map_generator(image_features)614 expected_slim_variables = set([615 'projection_1/weights',616 'projection_1/biases',617 'projection_2/weights',618 'projection_2/biases',619 'projection_3/weights',620 'projection_3/biases',621 'projection_4/weights',622 'projection_4/biases',623 'smoothing_1/weights',624 'smoothing_1/biases',625 'smoothing_2/weights',626 'smoothing_2/biases',627 'smoothing_3/weights',628 'smoothing_3/biases',629 ])630 expected_keras_variables = set([631 'FeatureMaps/top_down/projection_1/kernel',632 'FeatureMaps/top_down/projection_1/bias',633 'FeatureMaps/top_down/projection_2/kernel',634 'FeatureMaps/top_down/projection_2/bias',635 'FeatureMaps/top_down/projection_3/kernel',636 'FeatureMaps/top_down/projection_3/bias',637 'FeatureMaps/top_down/projection_4/kernel',638 'FeatureMaps/top_down/projection_4/bias',639 'FeatureMaps/top_down/smoothing_1_conv/kernel',640 'FeatureMaps/top_down/smoothing_1_conv/bias',641 'FeatureMaps/top_down/smoothing_2_conv/kernel',642 'FeatureMaps/top_down/smoothing_2_conv/bias',643 'FeatureMaps/top_down/smoothing_3_conv/kernel',644 'FeatureMaps/top_down/smoothing_3_conv/bias'645 ])646 init_op = tf.global_variables_initializer()647 with self.test_session() as sess:648 sess.run(init_op)649 sess.run(feature_maps)650 actual_variable_set = set(651 [var.op.name for var in tf.trainable_variables()])652 if use_keras:653 self.assertSetEqual(expected_keras_variables, actual_variable_set)654 else:655 self.assertSetEqual(expected_slim_variables, actual_variable_set)656 def test_get_expected_variable_names_with_depthwise(657 self, use_native_resize_op, use_keras):658 image_features = [659 ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)),660 ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)),661 ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)),662 ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32))663 ]664 feature_map_generator = self._build_feature_map_generator(665 image_features=image_features,666 depth=128,667 use_keras=use_keras,668 use_depthwise=True,669 use_native_resize_op=use_native_resize_op)670 feature_maps = feature_map_generator(image_features)671 expected_slim_variables = set([672 'projection_1/weights',673 'projection_1/biases',674 'projection_2/weights',675 'projection_2/biases',676 'projection_3/weights',677 'projection_3/biases',678 'projection_4/weights',679 'projection_4/biases',680 'smoothing_1/depthwise_weights',681 'smoothing_1/pointwise_weights',682 'smoothing_1/biases',683 'smoothing_2/depthwise_weights',684 'smoothing_2/pointwise_weights',685 'smoothing_2/biases',686 'smoothing_3/depthwise_weights',687 'smoothing_3/pointwise_weights',688 'smoothing_3/biases',689 ])690 expected_keras_variables = set([691 'FeatureMaps/top_down/projection_1/kernel',692 'FeatureMaps/top_down/projection_1/bias',693 'FeatureMaps/top_down/projection_2/kernel',694 'FeatureMaps/top_down/projection_2/bias',695 'FeatureMaps/top_down/projection_3/kernel',696 'FeatureMaps/top_down/projection_3/bias',697 'FeatureMaps/top_down/projection_4/kernel',698 'FeatureMaps/top_down/projection_4/bias',699 'FeatureMaps/top_down/smoothing_1_depthwise_conv/depthwise_kernel',700 'FeatureMaps/top_down/smoothing_1_depthwise_conv/pointwise_kernel',701 'FeatureMaps/top_down/smoothing_1_depthwise_conv/bias',702 'FeatureMaps/top_down/smoothing_2_depthwise_conv/depthwise_kernel',703 'FeatureMaps/top_down/smoothing_2_depthwise_conv/pointwise_kernel',704 'FeatureMaps/top_down/smoothing_2_depthwise_conv/bias',705 'FeatureMaps/top_down/smoothing_3_depthwise_conv/depthwise_kernel',706 'FeatureMaps/top_down/smoothing_3_depthwise_conv/pointwise_kernel',707 'FeatureMaps/top_down/smoothing_3_depthwise_conv/bias'708 ])709 init_op = tf.global_variables_initializer()710 with self.test_session() as sess:711 sess.run(init_op)712 sess.run(feature_maps)713 actual_variable_set = set(714 [var.op.name for var in tf.trainable_variables()])715 if use_keras:716 self.assertSetEqual(expected_keras_variables, actual_variable_set)717 else:718 self.assertSetEqual(expected_slim_variables, actual_variable_set)719class GetDepthFunctionTest(tf.test.TestCase):720 def test_return_min_depth_when_multiplier_is_small(self):721 depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,722 min_depth=16)723 self.assertEqual(depth_fn(16), 16)724 def test_return_correct_depth_with_multiplier(self):725 depth_fn = feature_map_generators.get_depth_fn(depth_multiplier=0.5,726 min_depth=16)727 self.assertEqual(depth_fn(64), 32)728@parameterized.parameters(729 {'replace_pool_with_conv': False},730 {'replace_pool_with_conv': True},731)732class PoolingPyramidFeatureMapGeneratorTest(tf.test.TestCase):733 def test_get_expected_feature_map_shapes(self, replace_pool_with_conv):734 image_features = {735 'image_features': tf.random_uniform([4, 19, 19, 1024])736 }737 feature_maps = feature_map_generators.pooling_pyramid_feature_maps(738 base_feature_map_depth=1024,739 num_layers=6,740 image_features=image_features,741 replace_pool_with_conv=replace_pool_with_conv)742 expected_pool_feature_map_shapes = {743 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),744 'MaxPool2d_0_2x2': (4, 10, 10, 1024),745 'MaxPool2d_1_2x2': (4, 5, 5, 1024),746 'MaxPool2d_2_2x2': (4, 3, 3, 1024),747 'MaxPool2d_3_2x2': (4, 2, 2, 1024),748 'MaxPool2d_4_2x2': (4, 1, 1, 1024),749 }750 expected_conv_feature_map_shapes = {751 'Base_Conv2d_1x1_1024': (4, 19, 19, 1024),752 'Conv2d_0_3x3_s2_1024': (4, 10, 10, 1024),753 'Conv2d_1_3x3_s2_1024': (4, 5, 5, 1024),754 'Conv2d_2_3x3_s2_1024': (4, 3, 3, 1024),755 'Conv2d_3_3x3_s2_1024': (4, 2, 2, 1024),756 'Conv2d_4_3x3_s2_1024': (4, 1, 1, 1024),757 }758 init_op = tf.global_variables_initializer()759 with self.test_session() as sess:760 sess.run(init_op)761 out_feature_maps = sess.run(feature_maps)762 out_feature_map_shapes = {key: value.shape763 for key, value in out_feature_maps.items()}764 if replace_pool_with_conv:765 self.assertDictEqual(expected_conv_feature_map_shapes,766 out_feature_map_shapes)767 else:768 self.assertDictEqual(expected_pool_feature_map_shapes,769 out_feature_map_shapes)770 def test_get_expected_variable_names(self, replace_pool_with_conv):771 image_features = {772 'image_features': tf.random_uniform([4, 19, 19, 1024])773 }774 feature_maps = feature_map_generators.pooling_pyramid_feature_maps(775 base_feature_map_depth=1024,776 num_layers=6,777 image_features=image_features,778 replace_pool_with_conv=replace_pool_with_conv)779 expected_pool_variables = set([780 'Base_Conv2d_1x1_1024/weights',781 'Base_Conv2d_1x1_1024/biases',782 ])783 expected_conv_variables = set([784 'Base_Conv2d_1x1_1024/weights',785 'Base_Conv2d_1x1_1024/biases',786 'Conv2d_0_3x3_s2_1024/weights',787 'Conv2d_0_3x3_s2_1024/biases',788 'Conv2d_1_3x3_s2_1024/weights',789 'Conv2d_1_3x3_s2_1024/biases',790 'Conv2d_2_3x3_s2_1024/weights',791 'Conv2d_2_3x3_s2_1024/biases',792 'Conv2d_3_3x3_s2_1024/weights',793 'Conv2d_3_3x3_s2_1024/biases',794 'Conv2d_4_3x3_s2_1024/weights',795 'Conv2d_4_3x3_s2_1024/biases',796 ])797 init_op = tf.global_variables_initializer()798 with self.test_session() as sess:799 sess.run(init_op)800 sess.run(feature_maps)801 actual_variable_set = set(802 [var.op.name for var in tf.trainable_variables()])803 if replace_pool_with_conv:804 self.assertSetEqual(expected_conv_variables, actual_variable_set)805 else:806 self.assertSetEqual(expected_pool_variables, actual_variable_set)807if __name__ == '__main__':...

Full Screen

Full Screen

pif_calculator.py

Source:pif_calculator.py Github

copy

Full Screen

1import collections2import math3import operator4import random5import itertools6import pandas as pd7import numpy as np8from piflib.data_util import calculate_distribution, complete_feature_priors9from piflib.entropy import create_conditional_entropy_table10def compute_cigs(dataframe,11 feature_priors={},12 feature_accuracies={},13 samples=None):14 """Compute the cell information gain (CIG) for all cells in the dataset.15 Find the risk (as KL divergence from prior) for all attributes.16 :param dataframe: a Pandas DataFrame object containing tabular data17 :param feature_priors: feature_priors are optional. It is a dictionary mapping the18 feature index to an assumed prior. If not provided, the prior for19 the feature is calculated from the global distribution.20 :param feature_accuracies: `feature_accuracies` maps the feature index to the accuracy of the21 feature. If not provided for a feature, it defaults to 1.22 :return: a Pandas DataFrame containing the CIG values. The CIG values are at the same index as their corresponding23 cell values in the input dataframe.24 """25 unknown_features = 126 dataset = dataframe.values27 num_features = len(dataset[0])28 assert all(len(row) == num_features for row in dataset)29 feature_priors = complete_feature_priors(dataframe, feature_priors)30 feature_accuracies = feature_accuracies.copy()31 for i in range(num_features):32 if i not in feature_accuracies:33 feature_accuracies[i] = 134 feature_counts = [0] * num_features35 feature_kls = [[0] * len(dataset) for _ in range(num_features)]36 for is_ in sample_is(num_features, unknown_features, samples):37 feature_kls_this = find_kls_for_features(38 dataset,39 is_,40 feature_priors,41 feature_accuracies)42 for i, feature_kl in zip(is_, feature_kls_this):43 feature_kl_previous = feature_kls[i]44 feature_kls[i] = tuple(map(45 operator.add, feature_kl, feature_kl_previous))46 for i in is_:47 feature_counts[i] += 148 for i, denom in enumerate(feature_counts):49 feature_kls[i] = tuple(map(50 operator.truediv,51 feature_kls[i],52 itertools.repeat(denom)))53 return pd.DataFrame(list(zip(*feature_kls)), columns=dataframe.columns)54def compute_weighted_cigs(dataframe, feature_priors={}, feature_accuracies={}):55 """Compute the Weighted Cell Information Gain (wCIG) for all cells in the dataset.56 Find the risk (as KL divergence from prior) for all attributes.57 :param dataframe: a Pandas DataFrame object containing tabular data58 :param feature_priors: feature_priors are optional. It is a dictionary mapping the59 feature index to an assumed prior. If not provided, the prior for60 the feature is calculated from the global distribution.61 :param feature_accuracies: `feature_accuracies` maps the feature index to the accuracy of the62 feature. If not provided for a feature, it defaults to 1.63 :return: a Pandas DataFrame containing the wCIG values. The wCIG values are at the same index as their64 corresponding cell values in the input dataframe.65 """66 cigs = compute_cigs(dataframe, feature_priors=feature_priors, feature_accuracies=feature_accuracies)67 cond_entropy = create_conditional_entropy_table(dataframe)68 weights = cond_entropy['H(X|Y)'].values / cond_entropy['H(X)']69 weights = np.nan_to_num(weights)70 return (cigs * weights).round(2)71def compute_csfs(df, feature_priors={}, feature_accuracies={}):72 """Compute the Cell Surprise Factor (CSF) for all cells in the dataset.73 The CSF id defined as the change in probability for a cell value between the prior and the posterior distribution.74 :param dataframe: a Pandas DataFrame object containing tabular data75 :param feature_priors: feature_priors are optional. It is a dictionary mapping the76 feature index to an assumed prior. If not provided, the prior for77 the feature is calculated from the global distribution.78 :param feature_accuracies: `feature_accuracies` maps the feature index to the accuracy of the79 feature. If not provided for a feature, it defaults to 1.80 :return: a Pandas DataFrame containing the CSF values. The CSF values are at the same index as their81 corresponding cell values in the input dataframe.82 """83 dataset = df.values84 num_features = len(dataset[0])85 # compute priors86 feature_priors = complete_feature_priors(df, feature_priors)87 feature_accuracies = feature_accuracies.copy()88 for i in range(num_features):89 if i not in feature_accuracies:90 feature_accuracies[i] = 191 feature_csfs = [[0] * len(dataset) for _ in range(num_features)]92 for is_ in sample_is(num_features, 1, None):93 feature_csfs[is_[0]] = apply_to_posterior_and_prior(94 dataset,95 is_,96 feature_priors,97 feature_accuracies,98 calculate_prob_change)99 return pd.DataFrame(list(zip(*feature_csfs)), columns=df.columns)100def compute_pif(cigs, percentile):101 """ compute the PIF.102 The PIF is defined as the n-th percentile of the individual RIG values. Or in other words, the RIG of n percent of103 the entities in the dataset does not exceed the PIF value.104 RIG stands for row information gain. It represents the overall information gain for an entity in the dataset. The105 RIG is computed by summing the CIG values of an entity.106 The percentile value can be chosen between 0 and 100. 100 will return the maximum RIG value. Often, the RIG values107 from a long tail distribution with few high value outliers. Choosing a percentile value lower than 100 will ignore108 (some of) the highest values. If ignoring the risk of some entities in the dataset fits within your risk framework,109 then specifying a percentile value of less than 100 will make the PIF value less susceptible to RIG outliers.110 :param cigs: The CIG values of the dataset (see the compute_cigs function in this module)111 :param percentile: Which percentile of RIG values should be included in the PIF.112 :returns: the PIF_percentile value of the given CIGs113 """114 rigs = cigs.sum(axis=1)115 pif = np.percentile(rigs, percentile)116 return pif117def compute_posterior_distributions(feature, df):118 known_features = tuple(col_name for col_name in df.columns if col_name != feature)119 bucket = collections.defaultdict(list)120 bucket_map = []121 for idx, row in df.iterrows():122 key = tuple(row[known_feature] for known_feature in known_features)123 bucket[key].append(row[feature])124 bucket_map.append(key)125 bucket_distributions = {key: calculate_distribution(el_bucket) for key, el_bucket in bucket.items()}126 feature_vals = df[feature].unique()127 dists = {}128 for key, distribution in bucket_distributions.items():129 dists[str(key)] = [distribution.get(feature_val, 0) for feature_val in feature_vals]130 return dists, feature_vals131def binom(n, r):132 """ return binomial coefficient: n choose k"""133 return math.factorial(n) // math.factorial(n - r) // math.factorial(r)134def sample_is(n, r, samples):135 if samples is None:136 yield from itertools.combinations(range(n), r)137 else:138 total_combinations = binom(n, r)139 if samples > total_combinations:140 raise ValueError('more samples than combinations')141 if samples >= total_combinations >> 1:142 all_combinations = list(itertools.combinations(range(n), r))143 random.shuffle(all_combinations)144 num_produced = 0145 feature_produced = [False] * n146 for i, comb in enumerate(all_combinations):147 if num_produced >= samples:148 break149 if all(map(feature_produced.__getitem__, comb)):150 continue151 for j in comb:152 feature_produced[j] = True153 num_produced += 1154 all_combinations[i] = None155 yield comb156 for comb in all_combinations:157 if num_produced >= samples:158 break159 if comb is not None:160 yield comb161 else:162 already_produced = set()163 feature_produced = [False] * n164 while len(already_produced) < samples:165 comb = random.sample(range(n), r)166 comb = tuple(sorted(comb))167 if (comb not in already_produced168 and (all(already_produced)169 or not all(map(already_produced.__getitem__,170 comb)))):171 already_produced.add(comb)172 for i in comb:173 feature_produced[i] = True174 yield comb175def apply_to_posterior_and_prior(dataset, feature_idx, prior_distributions, accuracies, fun):176 num_features = len(dataset[0])177 assert all(len(row) == num_features for row in dataset)178 feature_idx = feature_idx[0]179 buckets = collections.defaultdict(list)180 bucket_map = []181 for row in dataset:182 key = tuple(row[i] for i in range(num_features) if not i == feature_idx)183 buckets[key].append(row[feature_idx])184 bucket_map.append((key, row[feature_idx]))185 bucket_values = {186 key: fun(187 calculate_distribution(bucket,188 accuracy=accuracies[feature_idx],189 feature_distribution=prior_distributions[feature_idx]),190 prior_distributions[feature_idx])191 for key, bucket in buckets.items()}192 return [bucket_values[post_key][val] for post_key, val in bucket_map]193def find_kls_for_features(dataset, feature_is, feature_distributions, accuracies):194 """Find the KL divergence of feature values against the prior.195 We find the true distribution of the features taking into account196 the accuracy. We then compute the KL divergence.197 """198 num_features = len(dataset[0])199 assert all(len(row) == num_features for row in dataset)200 # one bucket per set of 'known' features201 buckets = [collections.defaultdict(list) for _ in range(len(feature_is))]202 bucket_map = [[] for _ in range(len(feature_is))]203 for row in dataset:204 key = tuple(row[i] for i in range(num_features) if i not in feature_is)205 for i, j in enumerate(feature_is):206 buckets[i][key].append(row[j])207 bucket_map[i].append(key)208 bucket_kls = [209 {210 key: calculate_kl(211 calculate_distribution(bucket,212 accuracy=accuracies[feature_is[i]],213 feature_distribution=feature_distributions[feature_is[i]]),214 feature_distributions[feature_is[i]])215 for key, bucket in feature_buckets.items()}216 for i, feature_buckets in enumerate(buckets)]217 return [tuple(map(bucket_kls[i].__getitem__, bucket_map[i]))218 for i in range(len(feature_is))]219def calculate_kl(p, q):220 """Calculate D_KL(P || Q) (the KL-divergence) in bits.221 D_KL(P || Q) is the `information gained when one revises one's222 beliefs from the prior probability distribution Q to the posterior223 probability distribution P`. (Wikipedia, Kullback–Leibler224 divergence)225 `p` and `q` are both dictionaries mapping some hashable to a number.226 It is assumed that they are both normalised: their values should add227 up to 0. `q` must not have any 0 values unless the corresponding `p`228 value is also 0.229 """230 return sum(pk * math.log2(pk / q[k])231 for k, pk in p.items()232 if pk > 0)233def calculate_prob_change(p, q):234 """ calculate the change in probability for each element of the posterior compared to the prior"""...

Full Screen

Full Screen

trip_quad.py

Source:trip_quad.py Github

copy

Full Screen

1from __future__ import division2import pickle3import numpy as np4import cv25import math6from sys import argv7from copy import *8from ProjectUtils import *9from GetDataPath import *10from testvgg import TestVgg11sys.path.insert(0, './')12sys.path.append('/home/xuyangf/project/ML_deliverables/Siamese_iclr17_tf-master/src/')13import network as vgg14from feature_extractor import FeatureExtractor15from utils import *16# import importlib17# importlib.reload(sys)18reload(sys) 19sys.setdefaultencoding('utf8')20cat=argv[1]21cluster_num = 25622myimage_path=LoadImage(cat)23image_path=[]24for mypath in myimage_path:25 myimg=cv2.imread(mypath, cv2.IMREAD_UNCHANGED)26 if(max(myimg.shape[0],myimg.shape[1])>100):27 image_path.append(mypath)28img_num=len(image_path)29layer_name = 'pool3'30file_path = '/data2/xuyangf/OcclusionProject/NaiveVersion/feature/feature3/L3Feature'+cat31#cluster_file = '/data2/xuyangf/OcclusionProject/NaiveVersion/cluster/clusterL3/vgg16_'+cat+'_K'+str(cluster_num)+'.pickle'32prun_file = '/data2/xuyangf/OcclusionProject/NaiveVersion/prunning/prunL3/dictionary_'+cat+'.pickle'33trip_save_file='/data2/xuyangf/OcclusionProject/NaiveVersion/vc_combinescore/3vc/cat'+cat34quad_save_file='/data2/xuyangf/OcclusionProject/NaiveVersion/vc_combinescore/4vc/cat'+cat35print('loading data...')36# number of files to read in37file_num = 1038maximg_cnt=2000039fname = file_path+str(0)+'.npz'40ff = np.load(fname)41feat_dim = ff['res'].shape[0]42img_cnt = ff['res'].shape[1]43oldimg_index=044originimage=[]45feat_set = np.zeros((feat_dim, maximg_cnt*file_num))46feat_set[:,0:img_cnt] = ff['res']47originimage+=list(ff['originpath'])48loc_dim = ff['loc_set'].shape[1]49print(loc_dim)50loc_set = np.zeros((maximg_cnt*file_num, loc_dim))51loc_set[0:img_cnt,:] = ff['loc_set']52#img_dim = ff['img_set'].shape[1:]53#img_set = np.zeros([maximg_cnt*file_num]+list(img_dim))54#img_set[0:img_cnt] = ff['img_set']55oldimg_index+=img_cnt56for ii in range(1,file_num):57 print(ii)58 fname = file_path+str(ii)+'.npz'59 ff = np.load(fname)60 originimage+=list(ff['originpath'])61 img_cnt=ff['res'].shape[1]62 print(img_cnt)63 feat_set[:,oldimg_index:(oldimg_index + img_cnt)] = ff['res']64 loc_set[oldimg_index:(oldimg_index + img_cnt),:] = ff['loc_set']65 #img_set[oldimg_index:(oldimg_index + img_cnt)] = ff['img_set']66 oldimg_index+=img_cnt67feat_set=feat_set[:,:oldimg_index]68#img_set=img_set[:oldimg_index]69loc_set=loc_set[:oldimg_index,:]70print('all feat_set')71print(feat_set.shape)72print('all img_set')73#print(img_set.shape)74#assert(len(originimage)==len(img_set))75with open(prun_file, 'rb') as fh:76 assignment, centers, _, norm = pickle.load(fh)77print('load finish')78predictor=TestVgg()79# def GetPossDecrease(original_img,occluded_img,category):80# originalp=predictor.getprob(original_img,category)81# occludedp=predictor.getprob(occluded_img,category)82# print('diff')83# print(originalp)84# print(occludedp)85# #0.3 magic number86# if(originalp<0.3):87# return -188# if(originalp<occludedp):89# return -190# return float(originalp-occludedp)/originalp91def GetPossDecrease(original_feature,occluded_feature,category):92 originalp=predictor.feature2result(original_feature,category)93 occludedp=predictor.feature2result(occluded_feature,category)94 print('diff')95 print(originalp)96 print(occludedp)97 #0.3 magic number98 if(originalp<0.3):99 return -1100 # if(originalp<occludedp):101 # return -1102 return float(originalp-occludedp)/originalp103def disttresh(input_index,cluster_center):104 thresh1=0.8105 temp_feat=feat_set[:,input_index]106 error = np.sum((temp_feat.T - cluster_center)**2, 1)107 sort_idx = np.argsort(error)108 return input_index[sort_idx[:int(thresh1*len(sort_idx))]]109fname ='/data2/xuyangf/OcclusionProject/NaiveVersion/vc_combinescore/2vc/cat'+str(cat)+'.npz'110ff=np.load(fname)111img_vc_avg=ff['vc_score']112vc_name=ff['vc']113vc_num=len(img_vc_avg)114img_vc_avg=np.array(img_vc_avg)115vc_name=np.array(vc_name)116b=np.argsort(-img_vc_avg)117trip_img_vc_score=[]118trip_img_vc=[]119quad_img_vc_score=[]120quad_img_vc=[]121topvc=b[:25]122for indexk in topvc:123 thename=vc_name[indexk]124 k1=int(thename[0])125 k2=int(thename[1])126 for indexkk in topvc:127 if indexk<=indexkk:128 continue129 thename2=vc_name[indexkk]130 k3=int(thename2[0])131 k4=int(thename2[1])132 myk=[]133 myk.append(k1)134 myk.append(k2)135 myk.append(k3)136 myk.append(k4)137 myk=set(myk)138 myk=list(myk)139 if len(myk)==3:140 trip_img_vc.append(str(myk[0])+'_'+str(myk[1])+'_'+str(myk[2]))141 else:142 quad_img_vc.append(str(myk[0])+'_'+str(myk[1])+'_'+str(myk[2])+'_'+str(myk[3]))143trip_img_vc=list(set(trip_img_vc))144quad_img_vc=list(set(quad_img_vc))145for s in trip_img_vc:146 dot1=s.find('_')147 k1=int(s[:dot1])148 s2=s[dot1+1:]149 dot2=s2.find('_')150 k2=int(s2[:dot2])151 s3=s2[dot2+1:]152 k3=int(s3)153 target1=centers[k1]154 index1=np.where(assignment==k1)[0]155 index1=disttresh(index1,target1)156 target2=centers[k2]157 index2=np.where(assignment==k2)[0]158 index2=disttresh(index2,target2)159 target3=centers[k3]160 index3=np.where(assignment==k3)[0]161 index3=disttresh(index3,target3)162 myscore=[]163 for n in range(0,img_num):164 myindex1=[]165 for i in range(len(index1)):166 if image_path[n]==originimage[index1[i]]:167 myindex1.append(index1[i])168 #myindex=OnlyTheClosest(myindex,target), or other preprocessing method169 myindex2=[]170 for i in range(len(index2)):171 if image_path[n]==originimage[index2[i]]:172 myindex2.append(index2[i])173 myindex3=[]174 for i in range(len(index3)):175 if image_path[n]==originimage[index3[i]]:176 myindex3.append(index3[i])177 if len(myindex1)==0:178 continue179 if len(myindex2)==0:180 continue181 if len(myindex3)==0:182 continue183 myindex=myindex1+myindex2+myindex3184 original_img=cv2.imread(image_path[n], cv2.IMREAD_UNCHANGED)185 original_feature=predictor.img2feature(original_img)186 occluded_feature=deepcopy(original_feature)187 for i in range(len(myindex)):188 fhi=int(loc_set[myindex[i]][7])189 fwi=int(loc_set[myindex[i]][8])190 191 # Gause occlusion192 occluded_feature[0][fhi][fwi]=0193 occluded_feature[0][fhi-1][fwi]=0.25*occluded_feature[0][fhi-1][fwi]194 occluded_feature[0][fhi][fwi-1]=0.25*occluded_feature[0][fhi][fwi-1]195 occluded_feature[0][fhi+1][fwi]=0.25*occluded_feature[0][fhi+1][fwi]196 occluded_feature[0][fhi][fwi+1]=0.25*occluded_feature[0][fhi][fwi+1]197 occluded_feature[0][fhi-1][fwi-1]=0.375*occluded_feature[0][fhi-1][fwi-1]198 occluded_feature[0][fhi+1][fwi-1]=0.375*occluded_feature[0][fhi+1][fwi-1]199 occluded_feature[0][fhi+1][fwi+1]=0.375*occluded_feature[0][fhi+1][fwi+1]200 occluded_feature[0][fhi-1][fwi+1]=0.375*occluded_feature[0][fhi-1][fwi+1]201 202 occluded_feature[0][fhi+2][fwi]=0.625*occluded_feature[0][fhi+2][fwi]203 occluded_feature[0][fhi-2][fwi]=0.625*occluded_feature[0][fhi-2][fwi]204 occluded_feature[0][fhi][fwi-2]=0.625*occluded_feature[0][fhi][fwi-2]205 occluded_feature[0][fhi][fwi+2]=0.625*occluded_feature[0][fhi][fwi+2]206 occluded_feature[0][fhi-1][fwi-2]=0.75*occluded_feature[0][fhi-1][fwi-2]207 occluded_feature[0][fhi-1][fwi+2]=0.75*occluded_feature[0][fhi-1][fwi+2]208 occluded_feature[0][fhi+1][fwi-2]=0.75*occluded_feature[0][fhi+1][fwi-2]209 occluded_feature[0][fhi+1][fwi+2]=0.75*occluded_feature[0][fhi+1][fwi+2]210 occluded_feature[0][fhi-2][fwi-1]=0.75*occluded_feature[0][fhi-2][fwi-1]211 occluded_feature[0][fhi-2][fwi+1]=0.75*occluded_feature[0][fhi-2][fwi+1]212 occluded_feature[0][fhi+2][fwi-1]=0.75*occluded_feature[0][fhi+2][fwi-1]213 occluded_feature[0][fhi+2][fwi+1]=0.75*occluded_feature[0][fhi+2][fwi+1]214 occluded_feature[0][fhi-2][fwi-2]=0.875*occluded_feature[0][fhi-21][fwi-2]215 occluded_feature[0][fhi-2][fwi+2]=0.875*occluded_feature[0][fhi-2][fwi+2]216 occluded_feature[0][fhi+2][fwi-2]=0.875*occluded_feature[0][fhi+2][fwi-2]217 occluded_feature[0][fhi+2][fwi+2]=0.875*occluded_feature[0][fhi+2][fwi+2] 218 # print(hi)219 # print(wi)220 # print(patch_size)221 drop=GetPossDecrease(original_feature,occluded_feature,int(cat))222 if drop!=-1:223 myscore.append(drop)224 trip_img_vc_score.append(float(np.sum(myscore))/img_num)225np.savez(trip_save_file,vc_score=trip_img_vc_score,vc=trip_img_vc)226for s in quad_img_vc:227 dot1=s.find('_')228 k1=int(s[:dot1])229 s2=s[dot1+1:]230 dot2=s2.find('_')231 k2=int(s2[:dot2])232 s3=s2[dot2+1:]233 dot3=s3.find('_')234 k3=int(s3[:dot3])235 s4=s3[dot3+1:]236 k4=int(s4)237 target1=centers[k1]238 index1=np.where(assignment==k1)[0]239 index1=disttresh(index1,target1)240 target2=centers[k2]241 index2=np.where(assignment==k2)[0]242 index2=disttresh(index2,target2)243 target3=centers[k3]244 index3=np.where(assignment==k3)[0]245 index3=disttresh(index3,target3)246 target4=centers[k4]247 index4=np.where(assignment==k4)[0]248 index4=disttresh(index4,target4)249 myscore=[]250 for n in range(0,img_num):251 myindex1=[]252 for i in range(len(index1)):253 if image_path[n]==originimage[index1[i]]:254 myindex1.append(index1[i])255 #myindex=OnlyTheClosest(myindex,target), or other preprocessing method256 myindex2=[]257 for i in range(len(index2)):258 if image_path[n]==originimage[index2[i]]:259 myindex2.append(index2[i])260 myindex3=[]261 for i in range(len(index3)):262 if image_path[n]==originimage[index3[i]]:263 myindex3.append(index3[i])264 myindex4=[]265 for i in range(len(index4)):266 if image_path[n]==originimage[index4[i]]:267 myindex4.append(index4[i])268 if len(myindex1)==0:269 continue270 if len(myindex2)==0:271 continue272 if len(myindex3)==0:273 continue274 if len(myindex4)==0:275 continue276 myindex=myindex1+myindex2+myindex3+myindex4277 original_img=cv2.imread(image_path[n], cv2.IMREAD_UNCHANGED)278 original_feature=predictor.img2feature(original_img)279 occluded_feature=deepcopy(original_feature)280 for i in range(len(myindex)):281 fhi=int(loc_set[myindex[i]][7])282 fwi=int(loc_set[myindex[i]][8])283 284 # Gause occlusion285 occluded_feature[0][fhi][fwi]=0286 occluded_feature[0][fhi-1][fwi]=0.25*occluded_feature[0][fhi-1][fwi]287 occluded_feature[0][fhi][fwi-1]=0.25*occluded_feature[0][fhi][fwi-1]288 occluded_feature[0][fhi+1][fwi]=0.25*occluded_feature[0][fhi+1][fwi]289 occluded_feature[0][fhi][fwi+1]=0.25*occluded_feature[0][fhi][fwi+1]290 occluded_feature[0][fhi-1][fwi-1]=0.375*occluded_feature[0][fhi-1][fwi-1]291 occluded_feature[0][fhi+1][fwi-1]=0.375*occluded_feature[0][fhi+1][fwi-1]292 occluded_feature[0][fhi+1][fwi+1]=0.375*occluded_feature[0][fhi+1][fwi+1]293 occluded_feature[0][fhi-1][fwi+1]=0.375*occluded_feature[0][fhi-1][fwi+1]294 295 occluded_feature[0][fhi+2][fwi]=0.625*occluded_feature[0][fhi+2][fwi]296 occluded_feature[0][fhi-2][fwi]=0.625*occluded_feature[0][fhi-2][fwi]297 occluded_feature[0][fhi][fwi-2]=0.625*occluded_feature[0][fhi][fwi-2]298 occluded_feature[0][fhi][fwi+2]=0.625*occluded_feature[0][fhi][fwi+2]299 occluded_feature[0][fhi-1][fwi-2]=0.75*occluded_feature[0][fhi-1][fwi-2]300 occluded_feature[0][fhi-1][fwi+2]=0.75*occluded_feature[0][fhi-1][fwi+2]301 occluded_feature[0][fhi+1][fwi-2]=0.75*occluded_feature[0][fhi+1][fwi-2]302 occluded_feature[0][fhi+1][fwi+2]=0.75*occluded_feature[0][fhi+1][fwi+2]303 occluded_feature[0][fhi-2][fwi-1]=0.75*occluded_feature[0][fhi-2][fwi-1]304 occluded_feature[0][fhi-2][fwi+1]=0.75*occluded_feature[0][fhi-2][fwi+1]305 occluded_feature[0][fhi+2][fwi-1]=0.75*occluded_feature[0][fhi+2][fwi-1]306 occluded_feature[0][fhi+2][fwi+1]=0.75*occluded_feature[0][fhi+2][fwi+1]307 occluded_feature[0][fhi-2][fwi-2]=0.875*occluded_feature[0][fhi-21][fwi-2]308 occluded_feature[0][fhi-2][fwi+2]=0.875*occluded_feature[0][fhi-2][fwi+2]309 occluded_feature[0][fhi+2][fwi-2]=0.875*occluded_feature[0][fhi+2][fwi-2]310 occluded_feature[0][fhi+2][fwi+2]=0.875*occluded_feature[0][fhi+2][fwi+2] 311 # print(hi)312 # print(wi)313 # print(patch_size)314 drop=GetPossDecrease(original_feature,occluded_feature,int(cat))315 if drop!=-1:316 myscore.append(drop)317 quad_img_vc_score.append(float(np.sum(myscore))/img_num)...

Full Screen

Full Screen

DragFeature.js

Source:DragFeature.js Github

copy

Full Screen

...136 }, this.dragCallbacks), {137 documentDrag: this.documentDrag138 }139 ),140 feature: new OpenLayers.Handler.Feature(141 this, this.layer, OpenLayers.Util.extend({142 // 'click' and 'clickout' callback are for the mobile143 // support: no 'over' or 'out' in touch based browsers.144 click: this.clickFeature,145 clickout: this.clickoutFeature,146 over: this.overFeature,147 out: this.outFeature148 }, this.featureCallbacks),149 {geometryTypes: this.geometryTypes}150 )151 };152 },153 /**154 * Method: clickFeature155 * Called when the feature handler detects a click-in on a feature.156 *157 * Parameters:158 * feature - {<OpenLayers.Feature.Vector>}159 */160 clickFeature: function(feature) {161 if (this.handlers.feature.touch && !this.over && this.overFeature(feature)) {162 this.handlers.drag.dragstart(this.handlers.feature.evt);163 // to let the events propagate to the feature handler (click callback)164 this.handlers.drag.stopDown = false;165 }166 },167 /**168 * Method: clickoutFeature169 * Called when the feature handler detects a click-out on a feature.170 *171 * Parameters:172 * feature - {<OpenLayers.Feature.Vector>}173 */174 clickoutFeature: function(feature) {175 if (this.handlers.feature.touch && this.over) {176 this.outFeature(feature);177 this.handlers.drag.stopDown = true;178 }179 },180 /**181 * APIMethod: destroy182 * Take care of things that are not handled in superclass183 */184 destroy: function() {185 this.layer = null;186 OpenLayers.Control.prototype.destroy.apply(this, []);187 },188 /**189 * APIMethod: activate190 * Activate the control and the feature handler.191 * 192 * Returns:193 * {Boolean} Successfully activated the control and feature handler.194 */195 activate: function() {196 return (this.handlers.feature.activate() &&197 OpenLayers.Control.prototype.activate.apply(this, arguments));198 },199 /**200 * APIMethod: deactivate201 * Deactivate the control and all handlers.202 * 203 * Returns:204 * {Boolean} Successfully deactivated the control.205 */206 deactivate: function() {207 // the return from the handlers is unimportant in this case208 this.handlers.drag.deactivate();209 this.handlers.feature.deactivate();210 this.feature = null;211 this.dragging = false;212 this.lastPixel = null;213 OpenLayers.Element.removeClass(214 this.map.viewPortDiv, this.displayClass + "Over"215 );216 return OpenLayers.Control.prototype.deactivate.apply(this, arguments);217 },218 /**219 * Method: overFeature220 * Called when the feature handler detects a mouse-over on a feature.221 * This activates the drag handler.222 *223 * Parameters:224 * feature - {<OpenLayers.Feature.Vector>} The selected feature.225 *226 * Returns:227 * {Boolean} Successfully activated the drag handler.228 */229 overFeature: function(feature) {230 var activated = false;231 if(!this.handlers.drag.dragging) {232 this.feature = feature;233 this.handlers.drag.activate();234 activated = true;235 this.over = true;236 OpenLayers.Element.addClass(this.map.viewPortDiv, this.displayClass + "Over");237 this.onEnter(feature);238 } else {239 if(this.feature.id == feature.id) {240 this.over = true;241 } else {242 this.over = false;243 }244 }245 return activated;246 },247 /**248 * Method: downFeature249 * Called when the drag handler detects a mouse-down.250 *251 * Parameters:252 * pixel - {<OpenLayers.Pixel>} Location of the mouse event.253 */254 downFeature: function(pixel) {255 this.lastPixel = pixel;256 this.onStart(this.feature, pixel);257 },258 /**259 * Method: moveFeature260 * Called when the drag handler detects a mouse-move. Also calls the261 * optional onDrag method.262 * 263 * Parameters:264 * pixel - {<OpenLayers.Pixel>} Location of the mouse event.265 */266 moveFeature: function(pixel) {267 var res = this.map.getResolution();268 this.feature.geometry.move(res * (pixel.x - this.lastPixel.x),269 res * (this.lastPixel.y - pixel.y));270 this.layer.drawFeature(this.feature);271 this.lastPixel = pixel;272 this.onDrag(this.feature, pixel);273 },274 /**275 * Method: upFeature276 * Called when the drag handler detects a mouse-up.277 * 278 * Parameters:279 * pixel - {<OpenLayers.Pixel>} Location of the mouse event.280 */281 upFeature: function(pixel) {282 if(!this.over) {283 this.handlers.drag.deactivate();284 }...

Full Screen

Full Screen

ToolboxView.js

Source:ToolboxView.js Github

copy

Full Screen

...46 var Feature = featureManager.get(featureName);47 if (!Feature) {48 return;49 }50 feature = new Feature(featureModel);51 }52 features[featureName] = feature;53 }54 else {55 feature = features[oldName];56 // If feature does not exsit.57 if (!feature) {58 return;59 }60 feature.model = featureModel;61 }62 if (!featureName && oldName) {63 feature.dispose && feature.dispose(ecModel, api);64 return;...

Full Screen

Full Screen

Using AI Code Generation

copy

Full Screen

1var f = new root.Feature();2var f = new root.Feature();3var f = new root.Feature();4var f = new root.Feature();5var f = new root.Feature();6var f = new root.Feature();7var f = new root.Feature();8var f = new root.Feature();9var f = new root.Feature();10var f = new root.Feature();11var f = new root.Feature();12var f = new root.Feature();13var f = new root.Feature();14var f = new root.Feature();15var f = new root.Feature();

Full Screen

Using AI Code Generation

copy

Full Screen

1var root = require('root');2root.Feature('test', function() {3 this.Given(/^I am on the homepage$/, function(callback) {4 callback.pending();5 });6 this.When(/^I search for "([^"]*)"$/, function(arg1, callback) {7 callback.pending();8 });9 this.Then(/^I should see "([^"]*)"$/, function(arg1, callback) {10 callback.pending();11 });12});13root.Feature('test', function() {14 this.Given(/^I am on the homepage$/, function(callback) {15 callback.pending();16 });17 this.When(/^I search for "([^"]*)"$/, function(arg1, callback) {18 callback.pending();19 });20 this.Then(/^I should see "([^"]*)"$/, function(arg1, callback) {21 callback.pending();22 });23});24var root = require('root');25root.Feature('test', function() {26 this.Given(/^I am on the homepage$/, function(callback) {27 callback.pending();28 });29 this.When(/^I search for "([^"]*)"$/, function(arg1, callback) {30 callback.pending();31 });32 this.Then(/^I should see "([^"]*)"$/, function(arg1, callback) {33 callback.pending();34 });35});36var root = require('root');37root.Feature('test', function() {38 this.Given(/^I am on the homepage$/, function(callback) {39 callback.pending();40 });41 this.When(/^I search for "([^"]*)"$/, function(arg1, callback) {42 callback.pending();43 });44 this.Then(/^I should see "([^"]*)"$/,

Full Screen

Using AI Code Generation

copy

Full Screen

1var feature = require('feature');2feature.Feature("test", function() {3});4var feature = require('feature');5feature.Feature("test", function() {6});7var feature = require('feature');8feature.Feature("test", function() {9});

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run root automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful