Best Python code snippet using lisa_python
vgg16_top_tf_keras.py
Source:vgg16_top_tf_keras.py  
...235      summaries_op = tf.summary.merge_all()236      #summary_writer.add_graph(sess.graph)237      sess.run(init)238      239      #dump_variables()240      241      # note that it is necessary to start with a fully-trained242      # classifier, including the top classifier,243      # in order to successfully do fine-tuning244      245      # prepare data augmentation configuration246      train_datagen = ImageDataGenerator(247          rescale=1. / 255,248          shear_range=0.2,249          zoom_range=0.2,250          horizontal_flip=True)251      252      test_datagen = ImageDataGenerator(rescale=1. / 255)253      ...trainer.py
Source:trainer.py  
1import chainer2from chainer import cuda, serializers3from chainer import computational_graph as cg4import numpy as np5import tqdm6import sys7import copy8from deepnet.core import config9from deepnet import utils10import os.path11import subprocess12import gc13from time import sleep14import corenet15class Trainer:16    def __init__(self,17                 network, train_iter, valid_iter,18                 visualizers, train_config, optimizer,19                 logger, archive_dir, archive_nodes,20                 postprocessor, redirect, architecture_loss,21                 ):22        config.set_global_config('main_network', network)23        self.network = network24        self.train_config = train_config25        self.n_max_train_iter = train_config['n_max_train_iter']26        self.n_max_valid_iter = train_config['n_max_valid_iter'] if train_config['n_max_valid_iter'] is not None else len(27            valid_iter.dataset)28        self.n_valid_step = train_config['n_valid_step']29        self.progress_vars = train_config['progress_vars']30        self.train_iter = train_iter31        self.valid_iter = valid_iter32        self.archive_dir = archive_dir33        self.archive_nodes = archive_nodes34        self.visualizers = visualizers35        self.postprocessor = postprocessor36        self.optimizer = optimizer37        for key, optimizer in self.optimizer.items():38            corenet.ChainerNode.add_updater(key, optimizer)39        self.logger = logger40        self.dump_variables = []41        self.redirect = redirect42        self.architecture_loss = architecture_loss43        for l in self.logger:44            for var_name in l.dump_variables:45                pos = var_name.find('.')46                if pos == -1:47                    self.dump_variables.append(var_name)48                else:49                    self.dump_variables.append(var_name[pos+1:])50        self.dump_variables = list(set(self.dump_variables))51    def train(self):52        with tqdm.tqdm(total=self.n_max_train_iter) as pbar:53            for i, batch in enumerate(self.train_iter):54                self.train_iteration = i55                variables = {}56                variables['__iteration__'] = i57                variables['__train_iteration__'] = self.train_iteration58                input_vars = self.batch_to_vars(batch)59                # Inference current batch.60                for stage_input in input_vars:61                    self.inference(stage_input, is_train=True)62                sleep(1e-3)63                # Back propagation and update network64                self.network.update()65                # Update variables.66                variables.update(self.network.variables)67                self.network.variables.clear()68                # Save network architecture69                if self.train_iteration == 0:70                    self.write_network_architecture(71                        self.architecture_loss[0],72                        variables[self.architecture_loss[1]]73                    )74                # Update variables and unwrapping chainer variable75                for var_name, value in variables.items():76                    variables[var_name] = utils.unwrapped(value)77                variables.update({'train.' + name: utils.unwrapped(value)78                                  for name, value in variables.items()})79                # validation if current iteraiton is multiplier as n_valid_step80                valid_keys = []81                if i % self.n_valid_step == 0:82                    valid_variables = self.validate(variables=variables)83                    variables.update(84                        {'valid.' + name: value for name, value in valid_variables.items()})85                    self.network.variables.clear()86                    del valid_variables87                # Write log88                for logger in self.logger:89                    logger(variables, is_valid=False)90                # Update progress bar91                self.print_description(pbar, variables)92                pbar.update()93                if self.n_max_train_iter <= i:94                    break95                # Refresh variables96                variables.clear()97                gc.collect()98    def validate(self, variables):99        valid_variables = dict()100        with tqdm.tqdm(total=self.n_max_valid_iter) as pbar, \101                chainer.no_backprop_mode():102            self.valid_iter.reset()103            for i, batch in enumerate(self.valid_iter):104                sleep(1e-3)105                self.valid_iteration = i106                variables['__iteration__'] = i107                variables['__valid_iteration__'] = self.valid_iteration108                input_vars = self.batch_to_vars(batch)109                # Inference110                for j, stage_input in enumerate(input_vars):111                    self.inference(stage_input, is_train=False)112                    variables['__stage__'] = j113                    variables.update(self.network.variables)114                for visualizer in self.visualizers:115                    visualizer(variables)116                # Update variables117                for var_name in self.dump_variables:118                    var = variables[var_name]119                    if var_name not in valid_variables:  # Initialize variable120                        if isinstance(var, chainer.Variable):121                            valid_variables[var_name] = chainer.functions.copy(122                                var, -1)123                        else:124                            valid_variables[var_name] = var125                    else:126                        if isinstance(var, chainer.Variable):127                            valid_variables[var_name] += chainer.functions.copy(128                                var, -1)129                # Post processing130                self.postprocessor(variables, 'valid', True)131                pbar.update(self.valid_iter.batch_size)132                if self.n_max_valid_iter <= (i + 1) * self.valid_iter.batch_size:133                    break134            pbar.close()135        for node_name in self.archive_nodes:136            try:137                serializers.save_npz(138                    os.path.join(self.archive_dir, node_name +139                                 '_{:08d}.npz'.format(variables['__train_iteration__'])),140                    self.network.get_node(node_name).model141                )142            except KeyError:143                raise KeyError('Failed to save npz file: ' + node_name)144        # Post processing145        self.postprocessor(variables, 'valid', False)146        # Compute mean variables147        for var_name in self.dump_variables:148            var = valid_variables[var_name]149            denom = float(self.n_max_valid_iter) / self.valid_iter.batch_size150            if isinstance(var, chainer.Variable):151                if self.train_config['gpu'][0] >= 0:152                    valid_variables[var_name] = float(153                        chainer.cuda.to_cpu((var / denom).data)154                    )155                else:156                    valid_variables[var_name] = float((var / denom).data)157        # Save visualized results158        for visualizer in self.visualizers:159            visualizer(variables)160            visualizer.save()161            visualizer.clear()162        return valid_variables163    def print_description(self, pbar, variables):164        disp_vars = {}165        display_var_formats = []166        for var_format in self.progress_vars:167            #168            var_name = ''169            pos = var_format.find(':')170            if pos == -1:171                var_name = var_format172            else:173                var_name = var_format[:pos]174            # cast variable175            var = variables[var_name]176            display_var_formats.append(var_name + '=' + '{' + var_format + '}')177            if isinstance(var, chainer.Variable):178                value = None179                if self.train_config['gpu'][0] >= 0:180                    value = chainer.cuda.to_cpu(var.data)181                else:182                    value = var.data183                if isinstance(value, np.ndarray):184                    if value.ndim == 0 or value.size == 1:185                        disp_vars[var_name] = float(value)186                    else:187                        disp_vars[var_name] = value.to_list()188            elif isinstance(var, np.ndarray):189                if var.ndim == 0 or var.size == 1:190                    disp_vars[var_name] = float(var)191                else:192                    disp_vars[var_name] = var.to_list()193            else:194                disp_vars[var_name] = var195        display_format = 'train[' + ','.join(display_var_formats) + ']'196        pbar.set_description(display_format.format(197            **disp_vars, __iteration__=variables['__iteration__']))198    def batch_to_vars(self, batch):199        # batch to vars200        input_vars = [dict() for elem in batch[0]]201        for elem in batch:              # loop about batch202            for i, stage_input in enumerate(elem):    # loop about stage input203                for name, input_ in stage_input.items():204                    input_vars[i].setdefault(name, []).append(input_)205        return input_vars206    def inference(self, stage_input, is_train=False):207        for key, value in list(stage_input.items()):208            if key not in self.redirect:209                continue210            stage_input[self.redirect[key]] = value211        self.network(mode='train' if is_train else 'valid', **stage_input)212    def write_network_architecture(self, graph_filename, loss):213        with open(graph_filename, 'w+') as o:214            o.write(cg.build_computational_graph((loss, )).dump())215        try:216            subprocess.call('dot -T png {} -o {}'.format(graph_filename,217                                                         graph_filename.replace('.dot', '.png')),218                            shell=True)219        except:220            warnings.warn('please install graphviz and set your environment.')221        try:222            subprocess.call('dot -T svg {} -o {}'.format(graph_filename,223                                                         graph_filename.replace('.dot', '.svg')),224                            shell=True)225        except:...__init__.py
Source:__init__.py  
1from floxcore.command import Stage2from floxcore.context import Flox3from floxcore.plugin import Plugin4from flox_sentry.configure import SentryConfiguration5from flox_sentry.project import create_team, create_project, assing_teams, dump_variables6class SentryPlugin(Plugin):7    def configuration(self):8        return SentryConfiguration()9    def handle_variables(self, flox: Flox):10        return (11            Stage(dump_variables, 1900),12        )13    def handle_project(self, flox: Flox):14        return [15            Stage(create_team, require=["sentry.create_team"]),16            Stage(create_project, 1900),17            Stage(dump_variables, 1900),18            Stage(assing_teams),19        ]20    def configured(self, flox) -> bool:21        return all([flox.settings.sentry.default_team, flox.settings.sentry.organization])22def plugin():...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
