Best Python code snippet using avocado_python
Learning.py
Source:Learning.py  
...75        ### forward76        ###77        if self.model.type() == 's_sc':78          batch_src, batch_tgt = batch_idxs[0], batch_idxs[1]79          src, msk_src = prepare_source(batch_src, self.idx_pad, device)80          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)81          pred = self.model.forward(src, tgt, msk_src, msk_tgt) #no log_softmax is applied82        elif self.model.type() == 's_s_scc_scc': 83          batch_src, batch_tgt, batch_xsrc, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2], batch_idxs[3]84          src, msk_src = prepare_source(batch_src, self.idx_pad, device)85          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)86          xsrc, msk_xsrc = prepare_source(batch_xsrc, self.idx_pad, device)87          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)88          pred_msk, pred = self.model.forward(src, xsrc, xtgt, tgt, msk_src, msk_xsrc, msk_xtgt, msk_tgt) #no log_softmax is applied89        elif self.model.type() == '2nmt_2c': 90          batch_src, batch_tgt, batch_xsrc, batch_xtgt, batch_hide_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2], batch_idxs[3], batch_idxs[4]91          src, msk_src = prepare_source(batch_src, self.idx_pad, device)92#          logging.info('src = {}'.format(src.shape))93#          logging.info('msk_src = {}'.format(msk_src.shape))94          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)95#          logging.info('tgt = {}'.format(tgt.shape))96#          logging.info('ref = {}'.format(ref.shape))97#          logging.info('msk_tgt = {}'.format(msk_tgt.shape))98          _, msk_tgt_cross = prepare_source_cross(batch_tgt, self.idx_pad, device)99#          logging.info('msk_tgt_cross = {}'.format(msk_tgt_cross.shape))100          hide_xtgt, msk_hide_xtgt = prepare_source(batch_hide_xtgt, self.idx_pad, device)101#          logging.info('hide_xtgt = {}'.format(hide_xtgt.shape))102#          logging.info('msk_hide_xtgt = {}'.format(msk_hide_xtgt.shape))103          xsrc, msk_xsrc = prepare_source(batch_xsrc, self.idx_pad, device)104#          logging.info('xsrc = {}'.format(xsrc.shape))105#          logging.info('msk_xsrc = {}'.format(msk_xsrc.shape))106          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)107#          logging.info('xtgt = {}'.format(xtgt.shape))108#          logging.info('msk_xtgt = {}'.format(msk_xtgt.shape))109          pred_hide, pred = self.model.forward(src, xsrc, xtgt, tgt, msk_src, msk_xsrc, msk_xtgt, msk_tgt, msk_tgt_cross) #no log_softmax is applied110        elif self.model.type() == 'sxs_sc':111          batch_src, batch_tgt, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2]112          src, msk_src = prepare_source(batch_src, self.idx_pad, device)113          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)114          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)115          pred = self.model.forward(src, xtgt, tgt, msk_src, msk_xtgt, msk_tgt) #no log_softmax is applied116        elif self.model.type() == 'sxsc_sc':117          batch_src, batch_tgt, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2]118          src, msk_src = prepare_source(batch_src, self.idx_pad, device)119          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)120          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)121          pred = self.model.forward(src, xtgt, tgt, msk_src, msk_xtgt, msk_tgt) #no log_softmax is applied122        elif self.model.type() == 's_s_scc':123          batch_src, batch_tgt, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2]124          src, msk_src = prepare_source(batch_src, self.idx_pad, device)125          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)126          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)127          pred = self.model.forward(src, xtgt, tgt, msk_src, msk_xtgt, msk_tgt) #no log_softmax is applied128        ###129        ### compute loss130        ###131        ntok_in_batch = torch.sum(ref != self.idx_pad)132        ntok_in_step += ntok_in_batch133        loss_trns = self.criter(pred, ref) / ntok_in_batch / self.accum_n_batchs #sum of losses in batch (normalized by tokens in batch) (n batchs will be accumulated before model update, so i normalize by n batchs)134        if self.model.type() == '2nmt_2c':135 #         logging.info('pred_hide = {}'.format(pred_hide.shape))136 #         logging.info('hide_xtgt = {}'.format(hide_xtgt.shape))137          alpha = 0.5138          ntokhide_in_batch = torch.sum(hide_xtgt != self.idx_pad)139#          logging.info('ntokhide_in_batch = {}'.format(ntokhide_in_batch))140          loss_hide = self.criter(pred_hide, hide_xtgt) / ntokhide_in_batch / self.accum_n_batchs141          loss = (alpha * loss_trns) + ((1.0-alpha) * loss_hide)142#          logging.info('loss_trns = {:.6f} loss_trns = {:.6f}'.format(loss_trns.item(), loss_hide.item()))143        else:144          loss = loss_trns145        loss_accum += loss.item()146        ###147        ### compute/accumulate gradients (accumulate gradients until step() is called)148        ###149        loss.backward()150        if n_batch % self.accum_n_batchs == 0: 151          ###152          ### optimize (update model)153          ###154          if self.clip > 0.0: ### clip gradients norm155            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)156          self.optScheduler.step() ### updates model parameters after incrementing step and updating lr157          self.optScheduler.optimizer.zero_grad() ### sets gradients to zero for next update158          ### add score159          score.step(loss_accum, ntok_in_step)160          ntok_in_step = 0161          loss_accum = 0.0162          ###163          ### report164          ###165          if self.report_every and self.optScheduler._step and self.optScheduler._step % self.report_every == 0:  ### first _step is 0166            loss_per_tok, steps_per_sec = score.report()167            logging.info('Learning step: {} epoch: {} batch: {} steps/sec: {:.2f} lr: {:.6f} Loss: {:.3f}'.format(self.optScheduler._step, n_epoch, n_batch, steps_per_sec, self.optScheduler._rate, loss_per_tok))168            score = Score()169            if tensorboard:170              self.writer.add_scalar('Loss/train', loss_accum, self.optScheduler._step)171              self.writer.add_scalar('LearningRate', self.optScheduler._rate, self.optScheduler._step)172          ###173          ### validate`174          ###175          if self.validate_every and self.optScheduler._step and self.optScheduler._step % self.validate_every == 0: 176            if validset is not None:177              vloss = self.validate(validset, device)178          ###179          ### save180          ###181          if self.save_every and self.optScheduler._step and self.optScheduler._step % self.save_every == 0: 182            save_checkpoint(self.suffix, self.model, self.optScheduler.optimizer, self.optScheduler._step, self.keep_last_n)183          ###184          ### stop by max_steps185          ###186          if self.max_steps and self.optScheduler._step and self.optScheduler._step >= self.max_steps: 187            if validset is not None:188              vloss = self.validate(validset, device)189            save_checkpoint(self.suffix, self.model, self.optScheduler.optimizer, self.optScheduler._step, self.keep_last_n)190            logging.info('Learning STOP by [steps={}]'.format(self.optScheduler._step))191            return192      ###193      ### stop by max_epochs194      ###195      if self.max_epochs and n_epoch >= self.max_epochs: ### stop by max_epochs196        if validset is not None:197          vloss = self.validate(validset, device)198        save_checkpoint(self.suffix, self.model, self.optScheduler.optimizer, self.optScheduler._step, self.keep_last_n)199        logging.info('Learning STOP by [epochs={}]'.format(n_epoch))200        return201  def validate(self, validset, device):202    tic = time.time()203    valid_loss = 0.0204    n_batch = 0205    with torch.no_grad():206      self.model.eval()207      for batch_pos, batch_idxs in validset:208        if self.model.type() == 's_sc':209          batch_src, batch_tgt = batch_idxs[0], batch_idxs[1]210          src, msk_src = prepare_source(batch_src, self.idx_pad, device)211          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)212          pred = self.model.forward(src, tgt, msk_src, msk_tgt) #no log_softmax is applied213        elif self.model.type() == 's_s_scc_scc':214          batch_src, batch_tgt, batch_xsrc, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2], batch_idxs[3]215          src, msk_src = prepare_source(batch_src, self.idx_pad, device)216          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)217          xsrc, msk_xsrc = prepare_source(batch_xsrc, self.idx_pad, device)218          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)219          pred_msk, pred = self.model.forward(src, xsrc, xtgt, tgt, msk_src, msk_xsrc, msk_xtgt, msk_tgt) #no log_softmax is applied220        elif self.model.type() == '2nmt_2c': 221          batch_src, batch_tgt, batch_xsrc, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2], batch_idxs[3]222          src, msk_src = prepare_source(batch_src, self.idx_pad, device)223          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)224          _, msk_tgt_cross = prepare_source_cross(batch_tgt, self.idx_pad, device)225          xsrc, msk_xsrc = prepare_source(batch_xsrc, self.idx_pad, device)226          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)227          pred_hide, pred = self.model.forward(src, xsrc, xtgt, tgt, msk_src, msk_xsrc, msk_xtgt, msk_tgt, msk_tgt_cross) #no log_softmax is applied228        elif self.model.type() == 'sxs_sc':229          batch_src, batch_tgt, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2]230          src, msk_src = prepare_source(batch_src, self.idx_pad, device)231          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)232          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)233          pred = self.model.forward(src, xtgt, tgt, msk_src, msk_xtgt, msk_tgt) #no log_softmax is applied234        elif self.model.type() == 'sxsc_sc':235          batch_src, batch_tgt, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2]236          src, msk_src = prepare_source(batch_src, self.idx_pad, device)237          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)238          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)239          pred = self.model.forward(src, xtgt, tgt, msk_src, msk_xtgt, msk_tgt) #no log_softmax is applied240        elif self.model.type() == 's_s_scc':241          batch_src, batch_tgt, batch_xtgt = batch_idxs[0], batch_idxs[1], batch_idxs[2]242          src, msk_src = prepare_source(batch_src, self.idx_pad, device)243          tgt, ref, msk_tgt = prepare_target(batch_tgt, self.idx_pad, device)244          xtgt, msk_xtgt = prepare_source(batch_xtgt, self.idx_pad, device)245          pred = self.model.forward(src, xtgt, tgt, msk_src, msk_xtgt, msk_tgt) #no log_softmax is applied246        n_batch += 1247        loss = self.criter(pred, ref) ### batch loss248        valid_loss += loss.item() / torch.sum(ref != self.idx_pad)249        if n_batch == 1:250          print_pos_src_tgt_hyp_ref(pred[0], batch_pos[0], src[0], tgt[0], ref[0])251    loss = 1.0*valid_loss/n_batch if n_batch else 0.0252    fref = validset.files[1]253    bleu = 0.0 #self.translate_valid(validset, fref)254    toc = time.time()255    logging.info('Validation step: {} #batchs: {} sec: {:.2f} bleu: {:.2f} loss: {:.3f}'.format(self.optScheduler._step, n_batch, toc-tic, bleu, loss))256    if tensorboard:257      self.writer.add_scalar('Loss/valid', loss, self.optScheduler._step)258    return loss...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
