How to use _output_summary method in stestr

Best Python code snippet using stestr_python

utils_nn.py

Source:utils_nn.py Github

copy

Full Screen

...87 tf.summary.histogram(params.op.name, params)88 tf.summary.histogram(params.op.name + '/spartisty', tf.nn.zero_fraction(params))89def _image_summary(params):90 tf.summary.image(params.op.name, params, max_outputs=1)91def _output_summary(outputs):92 tf.summary.histogram(outputs.op.name + '/outputs', outputs)93 tf.summary.scalar(outputs.op.name + '/outputs_sparsity',94 tf.nn.zero_fraction(outputs))95def get_weight_placeholders():96 return tf.get_collection('weight_placeholder')97def get_mask_placeholders():98 return tf.get_collection('mask_placeholder')99#################################################################################100#LAYERS101#################################################################################102def fully_connected_layer(x, units, name, gpu, config):103 '''104 Args:105 x: inputs from the previous layer, shape = [None, x.shape[0]].106 units: # of nodes of this layer, scalar.107 name: the name of this layer in graph.108 config: yaml file for the configuration.109 Returns:110 h: outputs, 1D tensor, shape = [None, units]111 '''112 with tf.variable_scope(name) as scope:113 # initialize layer parameters114 weights = _weights_initializer(name='weights',115 shape=[x.get_shape().as_list()[1], units],116 stddev=np.sqrt(2/units),117 gpu=gpu,118 config=config)119 biases = _biases_initializer(name='biases',120 shape=[units],121 val=0.1,122 gpu=gpu,123 config=config)124 # initilize and apply mask if specified to125 if config['use_mask']:126 mask = _mask_initializer(name='mask',127 shape=weights.get_shape().as_list(),128 gpu=gpu)129 # Apply mask to the weights130 masked_weights = tf.multiply(mask, weights, name='masked_weights')131 _parameter_summary(masked_weights)132 new_shape = masked_weights.get_shape().as_list()133 new_shape.insert(0,1)134 new_shape.append(1)135 _image_summary(tf.reshape(masked_weights, new_shape))136 # Calculate the output of the fully connected layer137 h = tf.add(tf.matmul(x, masked_weights), biases, name=name)138 else:139 h = tf.add(tf.matmul(x, weights), biases, name=name)140 # Adds summary to the weights and outputs141 _parameter_summary(weights)142 _parameter_summary(mask)143 _output_summary(h)144 if config['use_owl'] | config['use_growl'] | config['use_group_lasso']:145 # create interface for OWL146 w_placeholder = tf.placeholder(tf.float32, [x.get_shape().as_list()[1], units])147 assign_op_w = tf.assign(weights, w_placeholder, validate_shape=True)148 tf.add_to_collection('weight_placeholder', (weights, w_placeholder, assign_op_w))149 mask_placeholder = tf.placeholder(tf.float32, [x.get_shape().as_list()[1], units])150 assign_op_m = tf.assign(mask, mask_placeholder, validate_shape=True)151 tf.add_to_collection('mask_placeholder', (mask, mask_placeholder, assign_op_m))152 return h153def relu_layer(x, name):154 '''155 Args:156 x: inputs from the previous layer.157 Returns:158 h: activation.159 '''160 with tf.variable_scope(name) as scope:161 h = tf.nn.relu(x, name=name)162 _output_summary(h)163 return h164def batch_normalization_layer(x, axis, phase, name):165 '''166 Args:167 x: inputs.168 phase: boolean, true for training, false for testing.169 Returns:170 h: results.171 '''172 h = tf.layers.batch_normalization(x, axis=axis, training=phase, name=name)...

Full Screen

Full Screen

utils_nn-checkpoint.py

Source:utils_nn-checkpoint.py Github

copy

Full Screen

...87 tf.summary.histogram(params.op.name, params)88 tf.summary.histogram(params.op.name + '/spartisty', tf.nn.zero_fraction(params))89def _image_summary(params):90 tf.summary.image(params.op.name, params, max_outputs=1)91def _output_summary(outputs):92 tf.summary.histogram(outputs.op.name + '/outputs', outputs)93 tf.summary.scalar(outputs.op.name + '/outputs_sparsity',94 tf.nn.zero_fraction(outputs))95def get_weight_placeholders():96 return tf.get_collection('weight_placeholder')97def get_mask_placeholders():98 return tf.get_collection('mask_placeholder')99#################################################################################100#LAYERS101#################################################################################102def fully_connected_layer(x, units, name, gpu, config):103 '''104 Args:105 x: inputs from the previous layer, shape = [None, x.shape[0]].106 units: # of nodes of this layer, scalar.107 name: the name of this layer in graph.108 config: yaml file for the configuration.109 Returns:110 h: outputs, 1D tensor, shape = [None, units]111 '''112 with tf.variable_scope(name) as scope:113 # initialize layer parameters114 weights = _weights_initializer(name='weights',115 shape=[x.get_shape().as_list()[1], units],116 stddev=np.sqrt(2/units),117 gpu=gpu,118 config=config)119 biases = _biases_initializer(name='biases',120 shape=[units],121 val=0.1,122 gpu=gpu,123 config=config)124 # initilize and apply mask if specified to125 if config['use_mask']:126 mask = _mask_initializer(name='mask',127 shape=weights.get_shape().as_list(),128 gpu=gpu)129 # Apply mask to the weights130 masked_weights = tf.multiply(mask, weights, name='masked_weights')131 _parameter_summary(masked_weights)132 new_shape = masked_weights.get_shape().as_list()133 new_shape.insert(0,1)134 new_shape.append(1)135 _image_summary(tf.reshape(masked_weights, new_shape))136 # Calculate the output of the fully connected layer137 h = tf.add(tf.matmul(x, masked_weights), biases, name=name)138 else:139 h = tf.add(tf.matmul(x, weights), biases, name=name)140 # Adds summary to the weights and outputs141 _parameter_summary(weights)142 _parameter_summary(mask)143 _output_summary(h)144 if config['use_owl'] | config['use_growl'] | config['use_group_lasso']:145 # create interface for OWL146 w_placeholder = tf.placeholder(tf.float32, [x.get_shape().as_list()[1], units])147 assign_op_w = tf.assign(weights, w_placeholder, validate_shape=True)148 tf.add_to_collection('weight_placeholder', (weights, w_placeholder, assign_op_w))149 mask_placeholder = tf.placeholder(tf.float32, [x.get_shape().as_list()[1], units])150 assign_op_m = tf.assign(mask, mask_placeholder, validate_shape=True)151 tf.add_to_collection('mask_placeholder', (mask, mask_placeholder, assign_op_m))152 return h153def relu_layer(x, name):154 '''155 Args:156 x: inputs from the previous layer.157 Returns:158 h: activation.159 '''160 with tf.variable_scope(name) as scope:161 h = tf.nn.relu(x, name=name)162 _output_summary(h)163 return h164def batch_normalization_layer(x, axis, phase, name):165 '''166 Args:167 x: inputs.168 phase: boolean, true for training, false for testing.169 Returns:170 h: results.171 '''172 h = tf.layers.batch_normalization(x, axis=axis, training=phase, name=name)...

Full Screen

Full Screen

tiny_svr.py

Source:tiny_svr.py Github

copy

Full Screen

...92 return False93 def _reload(self):94 self.warn("reloading ...")95 self._clear_timer()96 self._output_summary()97 self._handler.close()98 reload(svr_util)99 reload(svr_conf)100 self.on_reload()101 self._start()102 self._is_reload = False103 def _close(self):104 self.warn("close ...")105 self._clear_timer()106 self._output_summary()107 self._is_run = False108 self._handler.close()109 def _start(self):110 self._conf = svr_conf.CONF111 self._get_logger()112 self._handler = self.get_handler()113 self._handler.start()114 self._init_timer()115 self.on_start()116 def _signal(self, sig, _):117 self.warn('signal %s' % sig)118 if sig in (signal.SIGINT, signal.SIGTERM):119 self._is_close = True120 elif sig == signal.SIGUSR1:121 self._is_reload = True122 def _get_logger(self):123 self._logger = svr_util.get_logger(self.get_name(), self._conf)124 self.info = self._logger.info125 self.warn = self._logger.warn126 self.error = self._logger.error127 def _states_check(self):128 if not self._last_report_time:129 return130 report_time = self._conf['svr.timer.run_status_check_time_span']131 delta_time = time.time() - self._last_report_time132 if delta_time < report_time:133 return134 self.error('run_states_check: not receive report in %s s, start auto-exit!' % report_time)135 # close safely136 os.kill(os.getpid(), signal.SIGINT)137 # if can't close safely force_exit138 delay = self._conf['svr.close.force_close_delay']139 threading.Timer(interval=delay, function=svr_util.force_exit, args=[-3]).start()140 def _output_summary(self):141 self.warn('<SUMMARY> %s' % self._handler.get_summary())142 def _clear_timer(self):143 self._timer.clear()144 self._thread_timer.clear()145 def _init_timer(self):146 self._timer = TinyTimerObserver(self._logger)147 self._thread_timer = TinyThreadTimerObserver(self._logger, self._conf['svr.timer.min_span'])148 check_timer = PeriodTimer(self._states_check, self._conf['svr.timer.run_status_check_time_span'])149 summary_timer = FixedPeriodTimer(self._output_summary, self._conf['svr.timer.summary_output_time_point'])150 self._thread_timer.add('check_timer', check_timer)151 self._thread_timer.add('summary_timer', summary_timer)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run stestr automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful