How to use detach method in Cucumber-gherkin

Best JavaScript code snippet using cucumber-gherkin

TestSuite_hotplug_mp.py

Source:TestSuite_hotplug_mp.py Github

copy

Full Screen

...66            if flg_exist == 1:67                self.verify(dev in out, "Fail that don't have the device!")68            if flg_exist == 0:69                self.verify(dev not in out, "Fail that have the device!")70    def attach_detach(self, process="pri", is_dev=1, opt_plug="plugin", flg_loop=0, dev="0000:00:00.0"):71        """72        Attach or detach physical/virtual device from primary/secondary73        process.74        process: define primary or secondary process.75        is_dev: define physical device as 1, virtual device as 0.76        opt_plug: define plug options as below77                  plugin: plug in device78                  plugout: plug out device79                  hotplug: plug in then plug out device from primary or80                           secondary process81                  crossplug: plug in from primary process then plug out from82                             secondary process, or plug in from secondary83                             process then plug out from primary84        flg_loop: define loop test flag85        dev: define physical device PCI "0000:00:00.0" or virtual device86             "net_af_packet"87        """88        if opt_plug == "plugin":89            self.verify_devlist(dev, flg_exist=0)90            for i in range(test_loop):91                if process == "pri":92                    if is_dev == 0:93                        self.session_pri.send_expect(94                            "attach %s,iface=%s"95                            % (dev, self.intf0), "example>", 100)96                    else:97                        self.session_pri.send_expect(98                            "attach %s" % dev, "example>", 100)99                if process == "sec":100                    if is_dev == 0:101                        self.session_sec_1.send_expect(102                            "attach %s,iface=%s"103                            % (dev, self.intf0), "example>", 100)104                    else:105                        self.session_sec_1.send_expect(106                            "attach %s" % dev, "example>", 100)107                if flg_loop == 0:108                    break109            self.verify_devlist(dev, flg_exist=1)110        if opt_plug == "plugout":111            self.verify_devlist(dev, flg_exist=1)112            for i in range(test_loop):113                if process == "pri":114                    self.session_pri.send_expect(115                        "detach %s" % dev, "example>", 100)116                if process == "sec":117                    self.session_sec_1.send_expect(118                        "detach %s" % dev, "example>", 100)119                if flg_loop == 0:120                    break121            self.verify_devlist(dev, flg_exist=0)122    def attach_detach_dev(self, process="pri", opt_plug="plugin", flg_loop=0, dev="0000:00:00.0"):123        """124        Attach or detach physical device from primary/secondary process.125        """126        # Scan port status when example setup, list ports that have been127        #  bound to pmd128        if opt_plug in ["plugin", "hotplug", "crossplug"]:129            self.multi_process_setup()130            self.dut.bind_interfaces_linux(self.drivername)131        elif opt_plug == "plugout":132            self.dut.bind_interfaces_linux(self.drivername)133            self.multi_process_setup()134        if opt_plug in ["plugin", "plugout"]:135            self.attach_detach(process, 1, opt_plug, flg_loop, dev)136        elif opt_plug in ["hotplug", "crossplug"]:137            for i in range(test_loop):138                self.attach_detach(process, 1, "plugin", flg_loop, dev)139                if opt_plug == "crossplug":140                    if process == "pri":141                        cross_proc = "sec"142                    elif process == "sec":143                        cross_proc = "pri"144                    self.attach_detach(cross_proc, 1, "plugout", flg_loop, dev)145                else:146                    self.attach_detach(process, 1, "plugout", flg_loop, dev)147        self.multi_process_quit()148        self.dut.bind_interfaces_linux(self.kdriver)149    def attach_detach_vdev(self, process="pri", opt_plug="plugin", flg_loop=0, dev="net_af_packet"):150        """151        Attach or detach virtual device from primary/secondary process.152        Check port interface is at link up status before hotplug test.153        If link not up, may have below error:154        rte_pmd_init_internals(): net_af_packet: ioctl failed (SIOCGIFINDEX)155        EAL: Driver cannot attach the device (net_af_packet)156        """157        self.dut.send_expect("ifconfig %s up" % self.intf0, "#")158        time.sleep(5)159        out = self.dut.send_expect("ethtool %s" % self.intf0, "#")160        self.verify("Link detected: yes" in out, "Wrong link status")161        self.multi_process_setup()162        for i in range(test_loop):163            self.attach_detach(process, 0, "plugin", flg_loop, dev)164            if opt_plug in ["plugout", "hotplug", "crossplug"]:165                if opt_plug == "crossplug":166                    if process == "pri":167                        cross_proc = "sec"168                    elif process == "sec":169                        cross_proc = "pri"170                    self.attach_detach(cross_proc, 0, "plugout", flg_loop, dev)171                else:172                    self.attach_detach(process, 0, "plugout", flg_loop, dev)173            if opt_plug == "plugin" or opt_plug == "plugout":174                break175        self.multi_process_quit()176    def test_attach_dev_primary(self):177        """178        Attach physical device from primary.179        """180        self.attach_detach_dev("pri", "plugin", 0, self.pci0)181    def test_attach_dev_secondary(self):182        """183        Attach physical device from secondary.184        """185        self.attach_detach_dev("sec", "plugin", 0, self.pci0)186    def test_detach_dev_primary(self):...

Full Screen

Full Screen

losser.py

Source:losser.py Github

copy

Full Screen

...29#30#             total_loss += F.nll_loss(multi_predict_log_soft_max, multi_label)31#32#33#         binary_predict_detach = F.softmax(binary_predict.detach(),dim=1)34#         multi_predict_detach = F.softmax(multi_predict.detach(),dim=1)35#36#         accuracy_ratio = self.calculate_recall_precision(binary_predict_detach, binary_label)37#38#         return total_loss, accuracy_ratio39#40#     def calculate_recall_precision(self, predict_detach, label):41#42#         predict_detach_argmax = torch.argmax(predict_detach, dim=1).long()43#44#         accuracy_number = torch.sum(predict_detach_argmax == label)45#46#         accuracy_ratio = accuracy_number/len(predict_detach)47#48#         return accuracy_ratio49# class Loss(nn.Module):50#51#     def __init__(self, args):52#53#         super(Loss, self).__init__()54#55#         self.args = args56#         self.calculate_multi_label_loss = self.args.calculate_multi_label_loss57#58#     def forward(self, binary_predict, multi_predict, multi_label, train=True):59#60#61#         # binary_predict_log_soft_max = F.log_softmax(binary_predict, dim=1)62#         multi_predict_log_soft_max = F.log_softmax(multi_predict, dim=1)63#64#         binary_predict_soft_max = F.softmax(binary_predict, dim=1)65#         binary_predict_log_soft_max = torch.log(binary_predict_soft_max)66#67#         binary_predict_log_soft_max = binary_predict_log_soft_max.view(binary_predict_log_soft_max.size(0), -1)68#         multi_predict_log_soft_max = multi_predict_log_soft_max.view(multi_predict_log_soft_max.size(0),-1)69#         multi_label   = multi_label.view(-1)70#71#         binary_label = torch.where(multi_label > 0, torch.full_like(multi_label, 1), multi_label).long()72#73#         weight = binary_predict_soft_max74#75#         weight[:, 0] = torch.pow(weight[:,0],2.0)76#         weight[:, 1] = torch.pow(1- weight[:, 1], 2.0)77#78#         binary_predict_log_soft_max  = binary_predict_log_soft_max * weight79#80#         total_loss = F.nll_loss(binary_predict_log_soft_max, binary_label)81#82#         if self.calculate_multi_label_loss == True:83#84#             total_loss += F.nll_loss(multi_predict_log_soft_max, multi_label)85#86#87#         binary_predict_detach = F.softmax(binary_predict.detach(),dim=1)88#         multi_predict_detach = F.softmax(multi_predict.detach(),dim=1)89#90#         binary_accuracy_ratio = self.calculate_recall_precision(binary_predict_detach, binary_label)91#92#         # print("**********************************************************************")93#         # print("label ", binary_label)94#         # print("pre " , binary_predict_detach)95#         # print("precision", binary_accuracy_ratio)96#         # print("loss ", total_loss)97#98#         return total_loss, binary_accuracy_ratio99#100#     def calculate_recall_precision(self, predict_detach, label):101#102#         predict_detach_argmax = torch.argmax(predict_detach, dim=1).long()103#104#         accuracy_number = torch.sum(predict_detach_argmax == label)105#106#         positive_accuracy =  torch.sum((predict_detach_argmax == 1)*(label==1))107#         negative_accuracy =  torch.sum((predict_detach_argmax == 0)*(label==0))108#109#         positive_accuracy_number = torch.sum(label)110#         negative_accuracy_number = len(label) - positive_accuracy_number111#112#         if positive_accuracy_number != 0:113#114#             positive_accuracy = positive_accuracy.float() / positive_accuracy_number115#116#         else:117#118#             positive_accuracy = torch.from_numpy(np.array([np.nan],dtype=np.float32)).cuda()[0]119#120#121#         if negative_accuracy_number != 0:122#123#             negative_accuracy = negative_accuracy.float() / negative_accuracy_number124#125#         else:126#127#             negative_accuracy = torch.from_numpy(np.array([np.nan],dtype=np.float32)).cuda()[0]128#129#130#         accuracy_ratio = accuracy_number.float()/len(predict_detach)131#132#         return accuracy_ratio, positive_accuracy, negative_accuracy133class Loss(nn.Module):134    def __init__(self, args):135        super(Loss, self).__init__()136        self.args = args137        self.calculate_multi_label_loss = self.args.calculate_multi_label_loss138    def forward(self, binary_predict, multi_predict, multi_label, train=True, binary_out2=None):139        #print(multi_label.cpu().numpy())140        binary_predict_soft_max = F.softmax(binary_predict, dim=1)141        binary_predict_log_soft_max = torch.log(binary_predict_soft_max)142        binary_predict_log_soft_max = binary_predict_log_soft_max.view(binary_predict_log_soft_max.size(0), -1)143        binary_label = torch.sum(multi_label, dim=1).long()144        weight = torch.pow(1 - binary_predict_soft_max, 2.0)145        binary_loss = F.nll_loss(binary_predict_log_soft_max, binary_label)146        binary_predict_detach = binary_predict_soft_max.detach()147        binary_accuracy_ratio = self.calculate_recall_precision(binary_predict_detach, binary_label)148        wrong_index = self.calculate_wrong_crop_image(binary_predict_detach, binary_label)149        '''150        multi_predict_p = F.sigmoid(multi_predict)151        multi_predict_detach = multi_predict_p.detach()152        multi_predict_p = torch.transpose(multi_predict_p, 0, 1)153        multi_predict_p_r = (torch.max(multi_predict_p, dim=0, keepdim=True)[0] - 0.00001) * torch.ones_like(multi_predict_p)154        multi_predict_p = torch.stack([multi_predict_p_r, multi_predict_p], dim=-1)155        multi_label_T = torch.transpose(multi_label, 0, 1)156        multi_loss = - 5 * F.logsigmoid(multi_predict) * multi_label.float() + (- F.logsigmoid(-multi_predict) * (1 - multi_label.float()))157        total_loss = torch.sum(multi_loss) / multi_loss.shape[0]  #+ binary_loss158        '''159        multi_predict_p = F.softmax(multi_predict, dim=1)160        multi_predict_detach = multi_predict_p.detach()161        multi_predict_p = torch.transpose(multi_predict_p, 0, 1)162        multi_predict_p = torch.stack([1 - multi_predict_p, multi_predict_p], dim=-1)163        multi_label_T = torch.transpose(multi_label, 0, 1)164        multiple_loss = F.nll_loss(F.log_softmax(multi_predict, dim=1), torch.argmax(multi_label, dim=1), reduction='none')165        multiple_loss = torch.sum(multiple_loss * binary_label.float()) / (torch.sum(binary_label.float()) + 1e-4)166        multi_accuracy_ratio = []167        for multi_predict_p_x, multi_label_T_x in zip(multi_predict_p, multi_label_T):168            multi_accuracy_ratio.append(self.calculate_recall_precision(multi_predict_p_x, multi_label_T_x))169        total_loss = multiple_loss170        return total_loss, [binary_accuracy_ratio] + multi_accuracy_ratio, wrong_index, multi_predict_detach171    def calculate_wrong_crop_image(self, binary_predict_detach, binary_label):172        predict_detach_argmax = torch.argmax(binary_predict_detach, dim=1).long()173        wrong_index = (predict_detach_argmax != binary_label)174        return wrong_index...

Full Screen

Full Screen

IQAloss.py

Source:IQAloss.py Github

copy

Full Screen

...44def monotonicity_regularization(y_pred, y, detach=False):45    """monotonicity regularization"""46    if y_pred.size(0) > 1:  #47        ranking_loss = F.relu((y_pred-y_pred.t()) * torch.sign((y.t()-y)))48        scale = 1 + torch.max(ranking_loss.detach()) if detach else 1 + torch.max(ranking_loss)49        return torch.sum(ranking_loss) / y_pred.size(0) / (y_pred.size(0)-1) / scale50    else:51        return F.l1_loss(y_pred, y_pred.detach())  # 0 for batch with single sample.52def linearity_induced_loss(y_pred, y, alpha=[1, 1], detach=False):53    """linearity-induced loss, actually MSE loss with z-score normalization"""54    if y_pred.size(0) > 1:  # z-score normalization: (x-m(x))/sigma(x).55        sigma_hat, m_hat = torch.std_mean(y_pred.detach(), unbiased=False) if detach else torch.std_mean(y_pred, unbiased=False)56        y_pred = (y_pred - m_hat) / (sigma_hat + eps)57        sigma, m = torch.std_mean(y, unbiased=False)58        y = (y - m) / (sigma + eps)59        scale = 460        loss0, loss1 = 0, 061        if alpha[0] > 0:62            loss0 = F.mse_loss(y_pred, y) / scale  # ~ 1 - rho, rho is PLCC63        if alpha[1] > 0:64            rho = torch.mean(y_pred * y)65            loss1 = F.mse_loss(rho * y_pred, y) / scale  # 1 - rho ** 2 = 1 - R^2, R^2 is Coefficient of determination66        # loss0 =  (1 - torch.cosine_similarity(y_pred.t() - torch.mean(y_pred), y.t() - torch.mean(y))[0]) / 267        # yp = y_pred.detach() if detach else y_pred68        # ones = torch.ones_like(yp.detach())69        # yp1 = torch.cat((yp, ones), dim=1)70        # h = torch.mm(torch.inverse(torch.mm(yp1.t(), yp1)), torch.mm(yp1.t(), y))71        # err = torch.pow(torch.mm(torch.cat((y_pred, ones), dim=1), h) - y, 2)  #72        # normalization = 1 + torch.max(err.detach()) if detach else 1 + torch.max(err)73        # loss1 = torch.mean(err) / normalization74        return (alpha[0] * loss0 + alpha[1] * loss1) / (alpha[0] + alpha[1])75    else:76        return F.l1_loss(y_pred, y_pred.detach())  # 0 for batch with single sample.77def norm_loss_with_normalization(y_pred, y, alpha=[1, 1], p=2, q=2, detach=False, exponent=True):78    """norm_loss_with_normalization: norm-in-norm"""79    N = y_pred.size(0)80    if N > 1:  81        m_hat = torch.mean(y_pred.detach()) if detach else torch.mean(y_pred)82        y_pred = y_pred - m_hat  # very important!!83        normalization = torch.norm(y_pred.detach(), p=q) if detach else torch.norm(y_pred, p=q)  # Actually, z-score normalization is related to q = 2.84        # print('bhat = {}'.format(normalization.item()))85        y_pred = y_pred / (eps + normalization)  # very important!86        y = y - torch.mean(y)87        y = y / (eps + torch.norm(y, p=q))88        scale = np.power(2, max(1,1./q)) * np.power(N, max(0,1./p-1./q)) # p, q>089        loss0, loss1 = 0, 090        if alpha[0] > 0:91            err = y_pred - y92            if p < 1:  # avoid gradient explosion when 0<=p<1; and avoid vanishing gradient problem when p < 093                err += eps 94            loss0 = torch.norm(err, p=p) / scale  # Actually, p=q=2 is related to PLCC95            loss0 = torch.pow(loss0, p) if exponent else loss0 #96        if alpha[1] > 0:97            rho =  torch.cosine_similarity(y_pred.t(), y.t())  #  98            err = rho * y_pred - y99            if p < 1:  # avoid gradient explosion when 0<=p<1; and avoid vanishing gradient problem when p < 0100                err += eps 101            loss1 = torch.norm(err, p=p) / scale  # Actually, p=q=2 is related to LSR102            loss1 = torch.pow(loss1, p) if exponent else loss1 #  #  103        # by = normalization.detach()104        # e0 = err.detach().view(-1)105        # ones = torch.ones_like(e0)106        # yhat = y_pred.detach().view(-1)107        # g0 = torch.norm(e0, p=p) / torch.pow(torch.norm(e0, p=p) + eps, p) * torch.pow(torch.abs(e0), p-1) * e0 / (torch.abs(e0) + eps)108        # ga = -ones / N * torch.dot(g0, ones)109        # gg0 = torch.dot(g0, g0)110        # gga = torch.dot(g0+ga, g0+ga)111        # print("by: {} without a and b: {} with a: {}".format(normalization, gg0, gga))112        # gb = -torch.pow(torch.abs(yhat), q-1) * yhat / (torch.abs(yhat) + eps) * torch.dot(g0, yhat)113        # gab = torch.dot(ones, torch.pow(torch.abs(yhat), q-1) * yhat / (torch.abs(yhat) + eps)) / N * torch.dot(g0, yhat)114        # ggb = torch.dot(g0+gb, g0+gb)115        # ggab = torch.dot(g0+ga+gb+gab, g0+ga+gb+gab)116        # print("by: {} without a and b: {} with a: {} with b: {} with a and b: {}".format(normalization, gg0, gga, ggb, ggab))117        return (alpha[0] * loss0 + alpha[1] * loss1) / (alpha[0] + alpha[1])118    else:119        return F.l1_loss(y_pred, y_pred.detach())  # 0 for batch with single sample.120def norm_loss_with_min_max_normalization(y_pred, y, alpha=[1, 1], detach=False):121    if y_pred.size(0) > 1:  122        m_hat = torch.min(y_pred.detach()) if detach else torch.min(y_pred)123        M_hat = torch.max(y_pred.detach()) if detach else torch.max(y_pred)124        y_pred = (y_pred - m_hat) / (eps + M_hat - m_hat)  # min-max normalization125        y = (y - torch.min(y)) / (eps + torch.max(y) - torch.min(y))126        loss0, loss1 = 0, 0127        if alpha[0] > 0:128            loss0 = F.mse_loss(y_pred, y)129        if alpha[1] > 0:130            rho =  torch.cosine_similarity(y_pred.t(), y.t())  #131            loss1 = F.mse_loss(rho * y_pred, y) 132        return (alpha[0] * loss0 + alpha[1] * loss1) / (alpha[0] + alpha[1])133    else:134        return F.l1_loss(y_pred, y_pred.detach())  # 0 for batch with single sample.135def norm_loss_with_mean_normalization(y_pred, y, alpha=[1, 1], detach=False):136    if y_pred.size(0) > 1:  137        mean_hat = torch.mean(y_pred.detach()) if detach else torch.mean(y_pred)138        m_hat = torch.min(y_pred.detach()) if detach else torch.min(y_pred)139        M_hat = torch.max(y_pred.detach()) if detach else torch.max(y_pred)140        y_pred = (y_pred - mean_hat) / (eps + M_hat - m_hat)  # mean normalization141        y = (y - torch.mean(y)) / (eps + torch.max(y) - torch.min(y))142        loss0, loss1 = 0, 0143        if alpha[0] > 0:144            loss0 = F.mse_loss(y_pred, y) / 4145        if alpha[1] > 0:146            rho =  torch.cosine_similarity(y_pred.t(), y.t())  #147            loss1 = F.mse_loss(rho * y_pred, y) / 4148        return (alpha[0] * loss0 + alpha[1] * loss1) / (alpha[0] + alpha[1])149    else:150        return F.l1_loss(y_pred, y_pred.detach())  # 0 for batch with single sample.151def norm_loss_with_scaling(y_pred, y, alpha=[1, 1], p=2, detach=False):152    if y_pred.size(0) > 1:  153        normalization = torch.norm(y_pred.detach(), p=p) if detach else torch.norm(y_pred, p=p) 154        y_pred = y_pred / (eps + normalization)  # mean normalization155        y = y / (eps + torch.norm(y, p=p))156        loss0, loss1 = 0, 0157        if alpha[0] > 0:158            loss0 = F.mse_loss(y_pred, y) / 4159        if alpha[1] > 0:160            rho =  torch.cosine_similarity(y_pred.t(), y.t())  #161            loss1 = F.mse_loss(rho * y_pred, y) / 4162        return (alpha[0] * loss0 + alpha[1] * loss1) / (alpha[0] + alpha[1])163    else:...

Full Screen

Full Screen

plotter.py

Source:plotter.py Github

copy

Full Screen

...60    fig, axs = plt.subplots(2, 2)61    fig.set_size_inches(12, 10)62    fig.suptitle(sTitle + ', inv err {:.2e}'.format(invErr))63    # hist, xbins, ybins, im = axs[0, 0].hist2d(x.numpy()[:,0],x.numpy()[:,1], range=[[LOW, HIGH], [LOW, HIGH]], bins = nBins)64    im1 , _, _, map1 = axs[0, 0].hist2d(x.detach().cpu().numpy()[:, d1], x.detach().cpu().numpy()[:, d2], range=[[LOWX, HIGHX], [LOWY, HIGHY]], bins=nBins)65    axs[0, 0].set_title('x from rho_0')66    im2 , _, _, map2 = axs[0, 1].hist2d(fx.detach().cpu().numpy()[:, d1], fx.detach().cpu().numpy()[:, d2], range=[[-4, 4], [-4, 4]], bins = nBins)67    axs[0, 1].set_title('f(x)')68    im3 , _, _, map3 = axs[1, 0].hist2d(finvfx.detach().cpu().numpy()[: ,d1] ,finvfx.detach().cpu().numpy()[: ,d2], range=[[LOWX, HIGHX], [LOWY, HIGHY]], bins = nBins)69    axs[1, 0].set_title('finv( f(x) )')70    im4 , _, _, map4 = axs[1, 1].hist2d(genModel.detach().cpu().numpy()[:, d1], genModel.detach().cpu().numpy()[:, d2], range=[[LOWX, HIGHX], [LOWY, HIGHY]], bins = nBins)71    axs[1, 1].set_title('finv( y from rho1 )')72    fig.colorbar(map1, cax=fig.add_axes([0.47, 0.53, 0.02, 0.35]) )73    fig.colorbar(map2, cax=fig.add_axes([0.89, 0.53, 0.02, 0.35]) )74    fig.colorbar(map3, cax=fig.add_axes([0.47, 0.11, 0.02, 0.35]) )75    fig.colorbar(map4, cax=fig.add_axes([0.89, 0.11, 0.02, 0.35]) )76    # plot paths77    if doPaths:78        forwPath = integrate(x[:, 0:d], net, [0.0, 1.0], nt_val, stepper="rk4", alph=net.alph, intermediates=True)79        backPath = integrate(fx[:, 0:d], net, [1.0, 0.0], nt_val, stepper="rk4", alph=net.alph, intermediates=True)80        # plot the forward and inverse trajectories of several points; white is forward, red is inverse81        nPts = 1082        pts = np.unique(np.random.randint(nSamples, size=nPts))83        for pt in pts:84            axs[0, 0].plot(forwPath[pt, 0, :].detach().cpu().numpy(), forwPath[pt, 1, :].detach().cpu().numpy(), color='white', linewidth=4)85            axs[0, 0].plot(backPath[pt, 0, :].detach().cpu().numpy(), backPath[pt, 1, :].detach().cpu().numpy(), color='red', linewidth=2)86    for i in range(axs.shape[0]):87        for j in range(axs.shape[1]):88            # axs[i, j].get_yaxis().set_visible(False)89            # axs[i, j].get_xaxis().set_visible(False)90            axs[i ,j].set_aspect('equal')91    # sPath = os.path.join(args.save, 'figs', sStartTime + '_{:04d}.png'.format(itr))92    if not os.path.exists(os.path.dirname(sPath)):93        os.makedirs(os.path.dirname(sPath))94    plt.savefig(sPath, dpi=300)95    plt.close()96def plotAutoEnc(x, xRecreate, sPath):97    # assume square image98    s = int(math.sqrt(x.shape[1]))99    nex = 8100    fig, axs = plt.subplots(4, nex//2)101    fig.set_size_inches(9, 9)102    fig.suptitle("first 2 rows originals. Rows 3 and 4 are generations.")103    for i in range(nex//2):104        axs[0, i].imshow(x[i,:].reshape(s,s).detach().cpu().numpy())105        axs[1, i].imshow(x[ nex//2 + i , : ].reshape(s,s).detach().cpu().numpy())106        axs[2, i].imshow(xRecreate[i,:].reshape(s,s).detach().cpu().numpy())107        axs[3, i].imshow(xRecreate[ nex//2 + i , : ].reshape(s, s).detach().cpu().numpy())108    for i in range(axs.shape[0]):109        for j in range(axs.shape[1]):110            axs[i, j].get_yaxis().set_visible(False)111            axs[i, j].get_xaxis().set_visible(False)112            axs[i ,j].set_aspect('equal')113    plt.subplots_adjust(wspace=0.0, hspace=0.0)114    if not os.path.exists(os.path.dirname(sPath)):115        os.makedirs(os.path.dirname(sPath))116    plt.savefig(sPath, dpi=300)117    plt.close()118def plotAutoEnc3D(x, xRecreate, sPath):119    nex = 8120    fig, axs = plt.subplots(4, nex//2)121    fig.set_size_inches(9, 9)122    fig.suptitle("first 2 rows originals. Rows 3 and 4 are generations.")123    for i in range(nex//2):124        axs[0, i].imshow(x[i,:].permute(1,2,0).detach().cpu().numpy())125        axs[1, i].imshow(x[ nex//2 + i , : ].permute(1,2,0).detach().cpu().numpy())126        axs[2, i].imshow(xRecreate[i,:].permute(1,2,0).detach().cpu().numpy())127        axs[3, i].imshow(xRecreate[ nex//2 + i , : ].permute(1,2,0).detach().cpu().numpy())128    for i in range(axs.shape[0]):129        for j in range(axs.shape[1]):130            axs[i, j].get_yaxis().set_visible(False)131            axs[i, j].get_xaxis().set_visible(False)132            axs[i ,j].set_aspect('equal')133    plt.subplots_adjust(wspace=0.0, hspace=0.0)134    if not os.path.exists(os.path.dirname(sPath)):135        os.makedirs(os.path.dirname(sPath))136    plt.savefig(sPath, dpi=300)137    plt.close()138def plotImageGen(x, xRecreate, sPath):139    # assume square image140    s = int(math.sqrt(x.shape[1]))141    nex = 80142    nCols = nex//5143    fig, axs = plt.subplots(7, nCols)144    fig.set_size_inches(16, 7)145    fig.suptitle("first 2 rows originals. Rows 3 and 4 are generations.")146    for i in range(nCols):147        axs[0, i].imshow(x[i,:].reshape(s,s).detach().cpu().numpy())148        # axs[1, i].imshow(x[ nex//3 + i , : ].reshape(s,s).detach().cpu().numpy())149        # axs[2, i].imshow(x[ 2*nex//3 + i , : ].reshape(s,s).detach().cpu().numpy())150        axs[2, i].imshow(xRecreate[i,:].reshape(s,s).detach().cpu().numpy())151        axs[3, i].imshow(xRecreate[nCols + i,:].reshape(s,s).detach().cpu().numpy())152        153        axs[4, i].imshow(xRecreate[2*nCols + i,:].reshape(s,s).detach().cpu().numpy())154        axs[5, i].imshow(xRecreate[3*nCols + i , : ].reshape(s, s).detach().cpu().numpy())155        axs[6, i].imshow(xRecreate[4*nCols + i , : ].reshape(s, s).detach().cpu().numpy())156    for i in range(axs.shape[0]):157        for j in range(axs.shape[1]):158            axs[i, j].get_yaxis().set_visible(False)159            axs[i, j].get_xaxis().set_visible(False)160            axs[i ,j].set_aspect('equal')161    plt.subplots_adjust(wspace=0.0, hspace=0.0)162    if not os.path.exists(os.path.dirname(sPath)):163        os.makedirs(os.path.dirname(sPath))164    plt.savefig(sPath, dpi=300)165    plt.close()166def plot4mnist(x, sPath, sTitle=""):167    """168    x - tensor (>4, 28,28)169    """170    fig, axs = plt.subplots(2, 2)171    fig.set_size_inches(12, 10)172    fig.suptitle(sTitle)173    im1 = axs[0, 0].imshow(x[0,:,:].detach().cpu().numpy())174    im2 = axs[0, 1].imshow(x[1,:,:].detach().cpu().numpy())175    im3 = axs[1, 0].imshow(x[2,:,:].detach().cpu().numpy())176    im4 = axs[1, 1].imshow(x[3,:,:].detach().cpu().numpy())177    fig.colorbar(im1, cax=fig.add_axes([0.47, 0.53, 0.02, 0.35]) )178    fig.colorbar(im2, cax=fig.add_axes([0.89, 0.53, 0.02, 0.35]) )179    fig.colorbar(im3, cax=fig.add_axes([0.47, 0.11, 0.02, 0.35]) )180    fig.colorbar(im4, cax=fig.add_axes([0.89, 0.11, 0.02, 0.35]) )181    for i in range(axs.shape[0]):182        for j in range(axs.shape[1]):183            axs[i, j].get_yaxis().set_visible(False)184            axs[i, j].get_xaxis().set_visible(False)185            axs[i ,j].set_aspect('equal')186    # sPath = os.path.join(args.save, 'figs', sStartTime + '_{:04d}.png'.format(itr))187    if not os.path.exists(os.path.dirname(sPath)):188        os.makedirs(os.path.dirname(sPath))189    plt.savefig(sPath, dpi=300)190    plt.close()

Full Screen

Full Screen

test_mlp.py

Source:test_mlp.py Github

copy

Full Screen

...21            mlp_layers.append(linear)22            mlp_layers.append(nn.ReLU(inplace=True))23        ref_mlp = nn.Sequential(*mlp_layers).cuda()24        test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()25        ref_input = test_input.clone().detach().requires_grad_()26        mlp_out = mlp(test_input)27        ref_out = ref_mlp(ref_input)28        np.testing.assert_allclose(29            mlp_out.detach().cpu().numpy(),30            ref_out.detach().cpu().numpy(),31            atol=1e-7, rtol=1e-5)32        # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out33        mlp_out.mean().mul(10.).backward()34        ref_out.mean().mul(10.).backward()35        np.testing.assert_allclose(36            test_input.grad.detach().cpu().numpy(),37            ref_input.grad.detach().cpu().numpy(),38            atol=0, rtol=1e-5)39        np.testing.assert_allclose(40            mlp.biases[0].grad.detach().cpu().numpy(),41            ref_mlp[0].bias.grad.detach().cpu().numpy(),42            atol=1e-7, rtol=1e-5)43    def test_no_bias(self):44        for use_activation in ['none', 'relu', 'sigmoid']:45            mlp = MLP(mlp_sizes, bias=False, activation=use_activation).cuda()46            mlp_layers = []47            for i in range(mlp.num_layers):48                linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1], bias=False)49                mlp.weights[i].data.copy_(linear.weight)50                mlp_layers.append(linear)51                if use_activation == 'relu':52                    mlp_layers.append(nn.ReLU(inplace=True))53                if use_activation == 'sigmoid':54                    mlp_layers.append(nn.Sigmoid())55            ref_mlp = nn.Sequential(*mlp_layers).cuda()56            test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()57            ref_input = test_input.clone().detach().requires_grad_()58            mlp_out = mlp(test_input)59            ref_out = ref_mlp(ref_input)60            np.testing.assert_allclose(61                mlp_out.detach().cpu().numpy(),62                ref_out.detach().cpu().numpy(),63                atol=1e-7, rtol=1e-5)64            # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out65            mlp_out.mean().mul(10.).backward()66            ref_out.mean().mul(10.).backward()67            np.testing.assert_allclose(68                test_input.grad.detach().cpu().numpy(),69                ref_input.grad.detach().cpu().numpy(),70                atol=0, rtol=100)71            np.testing.assert_allclose(72                mlp.weights[0].grad.detach().cpu().numpy(),73                ref_mlp[0].weight.grad.detach().cpu().numpy(),74                atol=1e-7, rtol=100)75    def test_with_bias(self):76        for use_activation in ['none', 'relu', 'sigmoid']:77            mlp = MLP(mlp_sizes, bias=True, activation=use_activation).cuda()78            mlp_layers = []79            for i in range(mlp.num_layers):80                linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1], bias=True)81                mlp.weights[i].data.copy_(linear.weight)82                mlp.biases[i].data.copy_(linear.bias)83                mlp_layers.append(linear)84                if use_activation == 'relu':85                    mlp_layers.append(nn.ReLU(inplace=True))86                if use_activation == 'sigmoid':87                    mlp_layers.append(nn.Sigmoid())88            ref_mlp = nn.Sequential(*mlp_layers).cuda()89            test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.).requires_grad_()90            ref_input = test_input.clone().detach().requires_grad_()91            mlp_out = mlp(test_input)92            ref_out = ref_mlp(ref_input)93            np.testing.assert_allclose(94                mlp_out.detach().cpu().numpy(),95                ref_out.detach().cpu().numpy(),96                atol=1e-7, rtol=1e-5)97            # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out98            mlp_out.mean().mul(10.).backward()99            ref_out.mean().mul(10.).backward()100            np.testing.assert_allclose(101                test_input.grad.detach().cpu().numpy(),102                ref_input.grad.detach().cpu().numpy(),103                atol=0, rtol=1)104            np.testing.assert_allclose(105                mlp.weights[0].grad.detach().cpu().numpy(),106                ref_mlp[0].weight.grad.detach().cpu().numpy(),107                atol=1e-7, rtol=1)108            np.testing.assert_allclose(109                mlp.biases[0].grad.detach().cpu().numpy(),110                ref_mlp[0].bias.grad.detach().cpu().numpy(),111                atol=1e-7, rtol=1e-5)112    def test_no_grad(self):113        mlp = MLP(mlp_sizes).cuda()114        mlp_layers = []115        for i in range(mlp.num_layers):116            linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])117            mlp.weights[i].data.copy_(linear.weight)118            mlp.biases[i].data.copy_(linear.bias)119            mlp_layers.append(linear)120            mlp_layers.append(nn.ReLU(inplace=True))121        ref_mlp = nn.Sequential(*mlp_layers).cuda()122        test_input = torch.empty(batch_size, mlp_sizes[0], device="cuda").uniform_(-1., 1.)123        ref_input = test_input.clone().detach()124        mlp_out = mlp(test_input)125        ref_out = ref_mlp(ref_input)126        np.testing.assert_allclose(127            mlp_out.detach().cpu().numpy(),128            ref_out.detach().cpu().numpy(),129            atol=1e-7, rtol=1e-5)130        # Use mean value as scalar loss. Multiply 10 to make it big enough not zero out131        mlp_out.mean().mul(10.).backward()132        ref_out.mean().mul(10.).backward()133        np.testing.assert_allclose(134            mlp.weights[0].grad.detach().cpu().numpy(),135            ref_mlp[0].weight.grad.detach().cpu().numpy(),136            atol=1e-7, rtol=1e-5)137    def test_performance_half(self):138        mlp = MLP(mlp_sizes).cuda().half()139        mlp_layers = []140        for i in range(mlp.num_layers):141            linear = nn.Linear(mlp_sizes[i], mlp_sizes[i + 1])142            mlp.weights[i].data.copy_(linear.weight)143            mlp.biases[i].data.copy_(linear.bias)144            mlp_layers.append(linear)145            mlp_layers.append(nn.ReLU(inplace=True))146        ref_mlp = nn.Sequential(*mlp_layers).cuda().half()147        test_input = torch.empty(148            batch_size, mlp_sizes[0], device="cuda", dtype=torch.half).fill_(10.).requires_grad_()149        ref_input = torch.empty(...

Full Screen

Full Screen

test_payment_method.py

Source:test_payment_method.py Github

copy

Full Screen

...66                "djstripe.Customer.coupon",67                "djstripe.Customer.default_payment_method",68            },69        )70    def test_detach(self):71        original_detach = PaymentMethodDict.detach72        def mocked_detach(*args, **kwargs):73            return original_detach(*args, **kwargs)74        with patch(75            "stripe.PaymentMethod.retrieve",76            return_value=deepcopy(FAKE_PAYMENT_METHOD_I),77            autospec=True,78        ):79            PaymentMethod.sync_from_stripe_data(deepcopy(FAKE_PAYMENT_METHOD_I))80        self.assertEqual(1, self.customer.payment_methods.count())81        payment_method = self.customer.payment_methods.first()82        with patch(83            "tests.PaymentMethodDict.detach", side_effect=mocked_detach, autospec=True84        ) as mock_detach, patch(85            "stripe.PaymentMethod.retrieve",86            return_value=deepcopy(FAKE_PAYMENT_METHOD_I),87            autospec=True,88        ):89            self.assertTrue(payment_method.detach())90        self.assertEqual(0, self.customer.payment_methods.count())91        self.assertIsNone(self.customer.default_payment_method)92        self.assertIsNone(payment_method.customer)93        if sys.version_info >= (3, 6):94            # this mock isn't working on py34, py35, but it's not strictly necessary95            # for the test96            mock_detach.assert_called()97        self.assert_fks(98            payment_method, expected_blank_fks={"djstripe.PaymentMethod.customer"}99        )100        with patch(101            "tests.PaymentMethodDict.detach",102            side_effect=InvalidRequestError(103                message="A source must be attached to a customer to be used "104                "as a `payment_method`",105                param="payment_method",106            ),107            autospec=True,108        ) as mock_detach, patch(109            "stripe.PaymentMethod.retrieve",110            return_value=deepcopy(FAKE_PAYMENT_METHOD_I),111            autospec=True,112        ) as payment_method_retrieve_mock:113            payment_method_retrieve_mock.return_value["customer"] = None114            self.assertFalse(115                payment_method.detach(), "Second call to detach should return false"116            )117    def test_detach_card(self):118        original_detach = PaymentMethodDict.detach119        # "card_" payment methods are deleted after detach120        deleted_card_exception = InvalidRequestError(121            message="No such payment_method: card_xxxx",122            param="payment_method",123            code="resource_missing",124        )125        def mocked_detach(*args, **kwargs):126            return original_detach(*args, **kwargs)127        with patch(128            "stripe.PaymentMethod.retrieve",129            return_value=deepcopy(FAKE_CARD_AS_PAYMENT_METHOD),130            autospec=True,131        ):132            PaymentMethod.sync_from_stripe_data(deepcopy(FAKE_CARD_AS_PAYMENT_METHOD))133        self.assertEqual(1, self.customer.payment_methods.count())134        payment_method = self.customer.payment_methods.first()135        self.assertTrue(136            payment_method.id.startswith("card_"), "We expect this to be a 'card_'"137        )138        with patch(139            "tests.PaymentMethodDict.detach", side_effect=mocked_detach, autospec=True140        ) as mock_detach, patch(141            "stripe.PaymentMethod.retrieve",142            return_value=deepcopy(FAKE_CARD_AS_PAYMENT_METHOD),143            autospec=True,144        ):145            self.assertTrue(payment_method.detach())146        self.assertEqual(0, self.customer.payment_methods.count())147        self.assertIsNone(self.customer.default_payment_method)148        self.assertEqual(149            PaymentMethod.objects.filter(id=payment_method.id).count(),150            0,151            "We expect PaymentMethod id = card_* to be deleted",152        )153        if sys.version_info >= (3, 6):154            # this mock isn't working on py34, py35, but it's not strictly necessary155            # for the test156            mock_detach.assert_called()157        with patch(158            "tests.PaymentMethodDict.detach",159            side_effect=InvalidRequestError(160                message="A source must be attached to a customer to be used "161                "as a `payment_method`",162                param="payment_method",163            ),164            autospec=True,165        ) as mock_detach, patch(166            "stripe.PaymentMethod.retrieve",167            side_effect=deleted_card_exception,168            autospec=True,169        ) as payment_method_retrieve_mock:170            payment_method_retrieve_mock.return_value["customer"] = None171            self.assertFalse(172                payment_method.detach(), "Second call to detach should return false"173            )174    def test_sync_null_customer(self):175        payment_method = PaymentMethod.sync_from_stripe_data(176            deepcopy(FAKE_PAYMENT_METHOD_I)177        )178        self.assertIsNotNone(payment_method.customer)179        # simulate remote detach180        fake_payment_method_no_customer = deepcopy(FAKE_PAYMENT_METHOD_I)181        fake_payment_method_no_customer["customer"] = None182        payment_method = PaymentMethod.sync_from_stripe_data(183            fake_payment_method_no_customer184        )185        self.assertIsNone(payment_method.customer)186        self.assert_fks(...

Full Screen

Full Screen

ben_division.py

Source:ben_division.py Github

copy

Full Screen

...26    """27    x, = ctx.saved_tensors28    grad_x = grad_output.clone()29    grad_x *= -10030    np.savetxt('denominator.csv', np.reshape(x.detach().numpy(),[-1,1]))31    raise ValueError('stop here')32    #print(x)33    return grad_x34class MyInvert2(torch.autograd.Function):35  """36  Second attempt, with grad *= -x37  """38  @staticmethod39  def forward(ctx, x):40    ctx.save_for_backward(x)41    return torch.div(1, x)42  @staticmethod43  def backward(ctx, grad_output):44    x, = ctx.saved_tensors45    grad_x = grad_output.clone()46    #print(np.shape(grad_x.detach().numpy()))47    #np.savetxt('denominator.csv', np.reshape(x.detach().numpy(),[-1,1]))48    #np.savetxt('grad.csv', np.reshape(grad_x.detach().numpy(),[-1,1]))49    #raise ValueError('stop here')50    grad_x *= -x51    #grad_x *= -1*x52    return grad_x53class MyInvert3(torch.autograd.Function):54  """55  Third attempt, with grad *= -x56  """57  @staticmethod58  def forward(ctx, x):59    ctx.save_for_backward(x)60    return torch.div(1, x)61  @staticmethod62  def backward(ctx, grad_output):63    x, = ctx.saved_tensors64    grad_x = grad_output.clone()65    grad_x[grad_x < 0] *= -x[grad_x < 0]66    grad_x[grad_x > 0] *= -167    return grad_x68class MyInvert_original(torch.autograd.Function):69  """70  Third attempt, with grad *= -x71  """72  @staticmethod73  def forward(ctx, x):74    ctx.save_for_backward(x)75    return torch.div(1, x)76  @staticmethod77  def backward(ctx, grad_output):78    x, = ctx.saved_tensors79    grad_x = grad_output.clone()80    #print("Mean of gradient received by inversion", np.mean(grad_x.detach().numpy(), axis=(0,2)))81    grad_x *= -1/torch.mul(x, x)82    return grad_x83class Mymul_original(torch.autograd.Function):84  """85  Attempt to make multi-input custom auto-grad function86  """87  @staticmethod88  def forward(ctx, x, y):89    ctx.save_for_backward(x,y)90    return torch.mul(x, y)91  @staticmethod92  def backward(ctx, grad):93    x,y, = ctx.saved_tensors94    grad = grad.clone()95    #print("Mean of gradient received by multiple", np.mean(grad.detach().numpy(),axis=(0,2)))96    grad_x = grad * y97    grad_y = grad * x98    return grad_x, grad_y99class Mydiv(torch.autograd.Function):100  """101  Attempt to make multi-input custom auto-grad function102  """103  @staticmethod104  def forward(ctx, x, y):105    ctx.save_for_backward(x, y)106    return torch.div(x, y)107  @staticmethod108  def backward(ctx, grad):109    x,y, = ctx.saved_tensors110    grad = grad.clone()111    grad_x = grad * y112    grad_y = grad * -x113    #grad_x = grad * torch.mul(y, y)114    #grad_y = grad * torch.mul(y, -x)115    #np.savetxt('grad.csv', np.reshape(grad.detach().numpy(),[-1,1]))116    #np.savetxt('numerator.csv', np.reshape(x.detach().numpy(),[-1,1]))117    #np.savetxt('denominator.csv', np.reshape(y.detach().numpy(),[-1,1]))118    #print("shape of grad get by division:", np.shape(grad.detach().numpy()))119    #print("Mean of gradient received by division", np.mean(grad.detach().numpy(), axis=(0,2)))120    #print("Mean of gradient received by division", np.mean(grad.detach().numpy(), axis=(1,2)))121    #print("Mean of gradient received by division", np.mean(grad.detach().numpy(), axis=(0,1)))122    """123    print("gradient received by division", grad.detach().numpy()[0,0,:])124    print("gradient passed to numerator", grad_x.detach().numpy()[0,0,:])125    print("gradient passed to denominator", grad_y.detach().numpy()[0,0,:])126    print("Numerator", x.detach().numpy()[0,0,:])127    print("denominator", y.detach().numpy()[0,0,:])128    raise ValueError("This is intentional stop for track backward gradient")129    """130    return grad_x, grad_y131class Mydiv2(torch.autograd.Function):132  """133  Attempt to make multi-input custom auto-grad function134  """135  @staticmethod136  def forward(ctx, x, y):137    ctx.save_for_backward(x,y)138    return torch.div(x, y)139  @staticmethod140  def backward(ctx, grad):141    x,y, = ctx.saved_tensors142    grad = grad.clone()143    #print("Mean of gradient received by multiple", np.mean(grad.detach().numpy(),axis=(0,2)))144    grad_x = grad145    grad_y = grad * -x / y146    return grad_x, grad_y147class Grad_mon(torch.autograd.Function):148  """149  Monitor gradient custom function150  """151  @staticmethod152  def forward(ctx, x):153    ctx.save_for_backward(x)154    return x155  @staticmethod156  def backward(ctx, grad):157    x, = ctx.saved_tensors158    grad = grad.clone()159    #print("gradient in grad mon stage", grad.detach().numpy()[0,:])160    #print("Mean of gradient received by multiple", np.mean(grad.detach().numpy(),axis=(0,2)))...

Full Screen

Full Screen

test_celeryd_detach.py

Source:test_celeryd_detach.py Github

copy

Full Screen

...12        def test_execs(self, setup_logs, logger, execv, detached):13            context = detached.return_value = Mock()14            context.__enter__ = Mock()15            context.__exit__ = Mock()16            detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log',17                   pidfile='/var/pid', hostname='foo@example.com')18            detached.assert_called_with(19                '/var/log', '/var/pid', None, None, None, None, False,20                after_forkers=False,21            )22            execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c'])23            r = detach('/bin/boo', ['a', 'b', 'c'],24                       logfile='/var/log', pidfile='/var/pid',25                       executable='/bin/foo', app=self.app)26            execv.assert_called_with('/bin/foo', ['/bin/foo', 'a', 'b', 'c'])27            execv.side_effect = Exception('foo')28            r = detach(29                '/bin/boo', ['a', 'b', 'c'],30                logfile='/var/log', pidfile='/var/pid',31                hostname='foo@example.com', app=self.app)32            context.__enter__.assert_called_with()33            logger.critical.assert_called()34            setup_logs.assert_called_with(35                'ERROR', '/var/log', hostname='foo@example.com')36            assert r == 137            self.patching('celery.current_app')38            from celery import current_app39            r = detach(40                '/bin/boo', ['a', 'b', 'c'],41                logfile='/var/log', pidfile='/var/pid',42                hostname='foo@example.com', app=None)43            current_app.log.setup_logging_subsystem.assert_called_with(44                'ERROR', '/var/log', hostname='foo@example.com',45            )46class test_PartialOptionParser:47    def test_parser(self):48        x = detached_celeryd(self.app)49        p = x.create_parser('celeryd_detach')50        options, leftovers = p.parse_known_args([51            '--logfile=foo', '--fake', '--enable',52            'a', 'b', '-c1', '-d', '2',53        ])...

Full Screen

Full Screen

Using AI Code Generation

copy

Full Screen

1var gherkin = require('cucumber-gherkin');2var fs = require('fs');3var parser = new gherkin.Parser();4var lexer = new gherkin.Lexer('en');5var parser = new gherkin.Parser();6var feature = parser.parse(lexer.lex('Feature: test7Then test'));8var feature = parser.parse(lexer.lex('Feature: test9Then test'));10var feature = parser.parse(lexer.lex('Feature: test11Then test'));12console.log(feature);13var feature = parser.parse(lexer.lex('Feature: test14Then test'));15var feature = parser.parse(lexer.lex('Feature: test16Then test'));17var feature = parser.parse(lexer.lex('Feature: test18Then test'));19var feature = parser.parse(lexer.lex('Feature: test20Then test'));21var feature = parser.parse(lexer.lex('Feature: test22Then test'));23var feature = parser.parse(lexer.lex('Feature: test24Then test'));25var feature = parser.parse(lexer.lex('Feature: test26Then test'));27var feature = parser.parse(lexer.lex('Feature: test28Then test'));29var feature = parser.parse(lexer.lex('Feature: test30Then test'));31var feature = parser.parse(lexer.lex('Feature: test32Then test'));33var feature = parser.parse(lexer.lex('Feature: test34Then test'));35var feature = parser.parse(lexer.lex('Feature: test36Then test'));37console.log(feature);

Full Screen

Using AI Code Generation

copy

Full Screen

1var gherkin = require('cucumber-gherkin');2var fs = require('fs');3var gherkinSource = fs.readFileSync('test.feature', 'utf-8');4var parser = new gherkin.Parser();5var feature = parser.parse(gherkinSource);6var featureJson = JSON.stringify(feature);7console.log(featureJson);8var gherkin = require('cucumber-gherkin');9var fs = require('fs');10var gherkinSource = fs.readFileSync('test.feature', 'utf-8');11var parser = new gherkin.Parser();12var feature = parser.parse(gherkinSource);13var featureJson = JSON.stringify(feature);14var html = require('cucumber-html');15var report = html.createReport({16});17fs.writeFileSync('report.html', report);

Full Screen

Using AI Code Generation

copy

Full Screen

1var gherkin = require('gherkin');2var parser = new gherkin.Parser();3var lexer = new gherkin.Lexer('en');4              Then I should see the login page\n';5var eventBroadcaster = new gherkin.EventBroadcaster();6var eventListener = new gherkin.Query();7eventBroadcaster.addEventListener(eventListener.getHandler());8var tokens = lexer.lex(source);9var parser = new gherkin.Parser();10var gherkinDocument = parser.parse(tokens);11var pickle = gherkinDocument.feature.children[0];12var pickleEvent = {13};14eventBroadcaster.broadcastEvent(pickleEvent);15var testStep = pickle.steps[0];16var testStepStartedEvent = {17};18eventBroadcaster.broadcastEvent(testStepStartedEvent);19var testStepFinishedEvent = {20    result: {21    }22};23eventBroadcaster.broadcastEvent(testStepFinishedEvent);24var testRunFinishedEvent = {25};26eventBroadcaster.broadcastEvent(testRunFinishedEvent);27eventBroadcaster.detach(eventListener.getHandler());28var testStep = pickle.steps[1];29var testStepStartedEvent = {30};31eventBroadcaster.broadcastEvent(testStepStartedEvent);32var testStepFinishedEvent = {33    result: {34    }35};36eventBroadcaster.broadcastEvent(testStepFinishedEvent);37var testRunFinishedEvent = {38};39eventBroadcaster.broadcastEvent(testRunFinishedEvent);40console.log(eventListener.getPickle('test scenario').steps[0].text);

Full Screen

Using AI Code Generation

copy

Full Screen

1const { Parser } = require('gherkin');2const parser = new Parser();3const gherkinDocument = parser.parse('Feature: Hello');4console.log(gherkinDocument.feature.name);5parser.detach();6const gherkinDocument2 = parser.parse('Feature: Hello2');7console.log(gherkinDocument2.feature.name);

Full Screen

Using AI Code Generation

copy

Full Screen

1var gherkin = require('cucumber-gherkin');2var fs = require('fs');3var gherkinSource = fs.readFileSync('test.feature', 'utf-8');4var parser = new gherkin.Parser();5var feature = parser.parse(gherkinSource);6var featureJson = JSON.stringify(feature);7console.log(featureJson);8var gherkin = require('cucumber-gherkin');9var fs = require('fs');10var gherkinSource = fs.readFileSync('test.feature', 'utf-8');11var parser = new gherkin.Parser();12var feature = parser.parse(gherkinSource);13var featureJson = JSON.stringify(feature);14var html = require('cucumber-html');15var report = html.createReport({16});17fs.writeFileSync('report.html', report);

Full Screen

Using AI Code Generation

copy

Full Screen

1const { Parser } = require('gherkin');2const parser = new Parser();3const gherkinDocument = parser.parse('Feature: Hello');4console.log(gherkinDocument.feature.name);5parser.detach();6const gherkinDocument2 = parser.parse('Feature: Hello2');7console.log(gherkinDocument2.feature.name);

Full Screen

Using AI Code Generation

copy

Full Screen

1const { Parser, TokenScanner, TokenMatcher } = require('gherkin');2const parser = new Parser(new TokenScanner(), new TokenMatcher());3`;4const feature = parser.parse(gherkin);5const scenario = feature.getScenario(0);6feature.detach(scenario);7const { Parser, TokenScanner, TokenMatcher } = require('gherkin');8const parser = new Parser(new TokenScanner(), new TokenMatcher());9`;10const feature = parser.parse(gherkin);11const scenario = feature.getScenario(0);12const step = scenario.getStep(0);13scenario.detach(step);14const { Parser, TokenScanner, TokenMatcher } = require('gherkin');15const parser = new Parser(new TokenScanner(), new TokenMatcher());16`;17const feature = parser.parse(gherkin);18const scenario = feature.getScenario(0);19const step = scenario.getStep(0);20scenario.detach(step);

Full Screen

Using AI Code Generation

copy

Full Screen

1var gherkin = require('gherkin');2var parser = new gherkin.Parser();3var source = "Feature: test\nScenario: test\nGiven I am on the page";4var events = parser.parse(source);5events.on('data', function(event) {6  console.log(event);7});8events.on('end', function() {9  console.log("Done");10});11var gherkin = require('gherkin');12var parser = new gherkin.Parser();13var source = "Feature: test\nScenario: test\nGiven I am on the page";14var events = parser.parse(source);15var ast = events.detach();16console.log(ast);

Full Screen

Cucumber Tutorial:

LambdaTest offers a detailed Cucumber testing tutorial, explaining its features, importance, best practices, and more to help you get started with running your automation testing scripts.

Cucumber Tutorial Chapters:

Here are the detailed Cucumber testing chapters to help you get started:

  • Importance of Cucumber - Learn why Cucumber is important in Selenium automation testing during the development phase to identify bugs and errors.
  • Setting Up Cucumber in Eclipse and IntelliJ - Learn how to set up Cucumber in Eclipse and IntelliJ.
  • Running First Cucumber.js Test Script - After successfully setting up your Cucumber in Eclipse or IntelliJ, this chapter will help you get started with Selenium Cucumber testing in no time.
  • Annotations in Cucumber - To handle multiple feature files and the multiple scenarios in each file, you need to use functionality to execute these scenarios. This chapter will help you learn about a handful of Cucumber annotations ranging from tags, Cucumber hooks, and more to ease the maintenance of the framework.
  • Automation Testing With Cucumber And Nightwatch JS - Learn how to build a robust BDD framework setup for performing Selenium automation testing by integrating Cucumber into the Nightwatch.js framework.
  • Automation Testing With Selenium, Cucumber & TestNG - Learn how to perform Selenium automation testing by integrating Cucumber with the TestNG framework.
  • Integrate Cucumber With Jenkins - By using Cucumber with Jenkins integration, you can schedule test case executions remotely and take advantage of the benefits of Jenkins. Learn how to integrate Cucumber with Jenkins with this detailed chapter.
  • Cucumber Best Practices For Selenium Automation - Take a deep dive into the advanced use cases, such as creating a feature file, separating feature files, and more for Cucumber testing.

Run Cucumber-gherkin automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful