How to use logs_dir method in Slash

Best Python code snippet using slash

log.py

Source:log.py Github

copy

Full Screen

...121 self.logger.propagate = False122 # 设置日志level,并统一更新由更新level连带的一系列更新123 self.update_level(level)124 # # 创建logs_dir125 # self.create_logs_dir(logs_dir=logs_dir)126 # # 为logger设置level127 # self.logger.setLevel(self.__level_mapping.get(self.level))128 # # 设置file_log & stream_log129 # self.set_file_log(fmt=fmt)130 # if stream:131 # self.set_stream_log()132 def update_level(self, level):133 '''设置日志level,并统一更新由更新level连带的一系列更新。134 :param level(String): log level。135 '''136 # 校验并更新level137 if not type(level) == str:138 raise TypeError(139 "level expected type is str but get type {}".format(140 type(level).__name__))141 elif not level in self.__level_list:142 raise NameError(143 "check level at least one corrected logLevel in: {}".format(144 self.__level_list))145 else:146 self.__dict__['level'] = level147 self.logger.setLevel(self.__level_mapping.get(self.level))148 self.create_logs_dir()149 # 清除之前的handlers150 self.logger.handlers.clear()151 self.set_file_log()152 self.set_stream_log()153 def create_logs_dir(self, logs_dir=None):154 '''创建logs_dir155 :param logs_dir(String): log文件路径。156 '''157 if logs_dir:158 self.logs_dir = os.path.join(logs_dir, 'logs', self.level)159 else:160 # self.logs_dir = sys.argv[0][0:-3] + '.log' # 动态获取调用文件的名字161 # print('logs_dir', self.logs_dir)162 self.logs_dir = os.path.join(163 os.path.dirname("__file__"), "logs", self.level)164 # print('create_logs_dir logs_dir', os.path.pardir, self.logs_dir)165 def set_file_log(self, fmt=None):166 '''设置日志格式167 :param fmt(): log格式。168 '''169 self.fmt = fmt or '%(asctime)-12s %(name)s %(filename)s[%(lineno)d] %(levelname)-8s %(message)-12s'170 self.formatter = _Formatter(self.fmt)171 # 输出到日志文件内172 # 普通文件输出173 # self.file = logging.FileHandler(self.logs_dir, encoding='utf-8')174 # 可按日志文件大小进行文件输出175 self.file = logging.handlers.RotatingFileHandler(176 filename=self.get_log_path(),177 mode='a',178 maxBytes=1000 * 100,179 backupCount=5,180 encoding="utf-8",181 delay=False182 )183 self.file.setFormatter(self.formatter)184 self.file.setLevel(self.__level_mapping.get(self.level))185 self.logger.addHandler(self.file)186 def set_stream_log(self):187 '''设置屏幕打印日志188 '''189 self.stream = logging.StreamHandler()190 self.stream.setFormatter(_Formatter(191 '<%(levelname)s> %(caller_file_name)s[%(caller_line_number)d] %(message)-12s'))192 self.stream.setLevel(self.__level_mapping.get(self.level))193 self.logger.addHandler(self.stream)194 def get_log_path(self, logs_dir=None):195 """创建log日志文件文件名,以当前日期进行命名196 :param logs_dir: 保存log日志文件的文件夹路径197 :return: 拼接好的log文件名。格式:path_to/logs/level/20200202.log198 """199 # 创建文件目录200 if not os.path.exists(self.logs_dir) and not os.path.isdir(self.logs_dir):201 try:202 os.mkdir(self.logs_dir)203 except Exception as e:204 os.makedirs(self.logs_dir)205 # 修改log保存位置206 timestamp = time.strftime("%Y%m%d", time.localtime())207 log_name = '%s.log' % timestamp208 log_path = os.path.join(self.logs_dir, log_name)209 return log_path210 def __getattr__(self, attr):211 # print('__getattr__', attr)212 # print('__getattr__',self.__level_list, attr,)213 # 简化调用logger层级(将level映射为方法): log.logger.info(msg) > log.info(msg)214 if attr in self.__level_list or attr == 'exception':215 # print('__getattr__ if', attr)216 return getattr(self.logger, attr)217 raise AttributeError(218 '{0} object has no attribute {1}'.format(self.__class__.__name__, attr))219 def __setattr__(self, attr, value):220 # print('__setattr__', dir(self))221 # print('__setattr__', attr, value)222 # self.attr = value # 错误写法 会再次调用自身__setattr__,造成死循环。223 self.__dict__[attr] = value224 # 若修改的属性是level 则需修改以下几个地方才可以225 if hasattr(self, attr) and attr in ['level']:226 # self.logger.setLevel(self.__level_mapping.get(self.level))227 # self.create_logs_dir()228 # self.set_file_log()229 # self.set_stream_log()230 self.update_level(value)231 def __call__(self, msg):232 '''进一步简化调用logger的字符数量: log.info(msg) > log(msg)233 '''234 return getattr(self, self.level)(msg)235# print('config.BASE_DIR',config.BASE_DIR)236# log = Log(username='nut')237# log = Log(stream=True,)238# log.info('config.BASE_DIR')239# log = Log(level="info")240# log = Log(level="error")241# log = Log(level="critical", stream=True)...

Full Screen

Full Screen

test_smart_dispatch.py

Source:test_smart_dispatch.py Github

copy

Full Screen

1import os2import unittest3import tempfile4import shutil5from os.path import join as pjoin, abspath6from subprocess import call7from nose.tools import assert_true, assert_equal8class TestSmartdispatcher(unittest.TestCase):9 def setUp(self):10 self.testing_dir = tempfile.mkdtemp()11 self.logs_dir = os.path.join(self.testing_dir, 'SMART_DISPATCH_LOGS')12 self.folded_commands = 'echo "[1 2 3 4]" "[6 7 8]" "[9 0]"'13 self.commands = ["echo 1 6 9", "echo 1 6 0", "echo 1 7 9", "echo 1 7 0", "echo 1 8 9", "echo 1 8 0",14 "echo 2 6 9", "echo 2 6 0", "echo 2 7 9", "echo 2 7 0", "echo 2 8 9", "echo 2 8 0",15 "echo 3 6 9", "echo 3 6 0", "echo 3 7 9", "echo 3 7 0", "echo 3 8 9", "echo 3 8 0",16 "echo 4 6 9", "echo 4 6 0", "echo 4 7 9", "echo 4 7 0", "echo 4 8 9", "echo 4 8 0"]17 self.nb_commands = len(self.commands)18 scripts_path = abspath(pjoin(os.path.dirname(__file__), os.pardir, "scripts"))19 self.smart_dispatch_command = '{} -C 1 -q test -t 5:00 -x'.format(pjoin(scripts_path, 'smart-dispatch'))20 self.launch_command = "{0} launch {1}".format(self.smart_dispatch_command, self.folded_commands)21 self.resume_command = "{0} resume {{0}}".format(self.smart_dispatch_command)22 smart_dispatch_command_with_pool = '{} --pool 10 -C 1 -q test -t 5:00 -x {{0}}'.format(pjoin(scripts_path, 'smart-dispatch'))23 self.launch_command_with_pool = smart_dispatch_command_with_pool.format('launch ' + self.folded_commands)24 self.nb_workers = 1025 smart_dispatch_command_with_cores = '{} -C 1 -c {{cores}} -q test -t 5:00 -x {{0}}'.format(pjoin(scripts_path, 'smart-dispatch'))26 self.launch_command_with_cores = smart_dispatch_command_with_cores.format('launch ' + self.folded_commands, cores='{cores}')27 self._cwd = os.getcwd()28 os.chdir(self.testing_dir)29 def tearDown(self):30 os.chdir(self._cwd)31 shutil.rmtree(self.testing_dir)32 def test_main_launch(self):33 # Actual test34 exit_status = call(self.launch_command, shell=True)35 # Test validation36 assert_equal(exit_status, 0)37 assert_true(os.path.isdir(self.logs_dir))38 assert_equal(len(os.listdir(self.logs_dir)), 1)39 batch_uid = os.listdir(self.logs_dir)[0]40 path_job_commands = os.path.join(self.logs_dir, batch_uid, "commands")41 assert_equal(len(os.listdir(path_job_commands)), self.nb_commands + 1)42 def test_launch_using_commands_file(self):43 # Actual test44 commands_filename = "commands_to_run.txt"45 open(commands_filename, 'w').write("\n".join(self.commands))46 launch_command = self.smart_dispatch_command + " -f {0} launch".format(commands_filename)47 exit_status = call(launch_command, shell=True)48 # Test validation49 assert_equal(exit_status, 0)50 assert_true(os.path.isdir(self.logs_dir))51 assert_equal(len(os.listdir(self.logs_dir)), 1)52 batch_uid = os.listdir(self.logs_dir)[0]53 path_job_commands = os.path.join(self.logs_dir, batch_uid, "commands")54 assert_equal(len(os.listdir(path_job_commands)), self.nb_commands + 1)55 assert_equal(open(pjoin(path_job_commands, 'commands.txt')).read(), "\n".join(self.commands) + "\n")56 def test_main_launch_with_pool_of_workers(self):57 # Actual test58 exit_status = call(self.launch_command_with_pool, shell=True)59 # Test validation60 assert_equal(exit_status, 0)61 assert_true(os.path.isdir(self.logs_dir))62 assert_equal(len(os.listdir(self.logs_dir)), 1)63 batch_uid = os.listdir(self.logs_dir)[0]64 path_job_commands = os.path.join(self.logs_dir, batch_uid, "commands")65 assert_equal(len(os.listdir(path_job_commands)), self.nb_workers + 1)66 def test_main_launch_with_cores_command(self):67 # Actual test68 exit_status_0 = call(self.launch_command_with_cores.format(cores=0), shell=True)69 exit_status_100 = call(self.launch_command_with_cores.format(cores=100), shell=True)70 # Test validation71 assert_equal(exit_status_0, 2)72 assert_equal(exit_status_100, 2) 73 assert_true(os.path.isdir(self.logs_dir))74 def test_main_resume(self):75 # Setup76 call(self.launch_command, shell=True)77 batch_uid = os.listdir(self.logs_dir)[0]78 # Simulate that some commands are in the running state.79 path_job_commands = os.path.join(self.logs_dir, batch_uid, "commands")80 pending_commands_file = pjoin(path_job_commands, "commands.txt")81 running_commands_file = pjoin(path_job_commands, "running_commands.txt")82 commands = open(pending_commands_file).read().strip().split("\n")83 with open(running_commands_file, 'w') as running_commands:84 running_commands.write("\n".join(commands[::2]) + "\n")85 with open(pending_commands_file, 'w') as pending_commands:86 pending_commands.write("\n".join(commands[1::2]) + "\n")87 # Actual test (should move running commands back to pending).88 exit_status = call(self.resume_command.format(batch_uid), shell=True)89 # Test validation90 assert_equal(exit_status, 0)91 assert_true(os.path.isdir(self.logs_dir))92 assert_equal(len(os.listdir(self.logs_dir)), 1)93 assert_equal(len(open(running_commands_file).readlines()), 0)94 assert_equal(len(open(pending_commands_file).readlines()), len(commands))95 # Test when batch_uid is a path instead of a jobname.96 # Setup97 batch_uid = os.path.join(self.logs_dir, os.listdir(self.logs_dir)[0])98 # Simulate that some commands are in the running state.99 path_job_commands = os.path.join(self.logs_dir, batch_uid, "commands")100 pending_commands_file = pjoin(path_job_commands, "commands.txt")101 running_commands_file = pjoin(path_job_commands, "running_commands.txt")102 commands = open(pending_commands_file).read().strip().split("\n")103 with open(running_commands_file, 'w') as running_commands:104 running_commands.write("\n".join(commands[::2]) + "\n")105 with open(pending_commands_file, 'w') as pending_commands:106 pending_commands.write("\n".join(commands[1::2]) + "\n")107 # Actual test (should move running commands back to pending).108 exit_status = call(self.resume_command.format(batch_uid), shell=True)109 # Test validation110 assert_equal(exit_status, 0)111 assert_true(os.path.isdir(self.logs_dir))112 assert_equal(len(os.listdir(self.logs_dir)), 1)113 assert_equal(len(open(running_commands_file).readlines()), 0)114 assert_equal(len(open(pending_commands_file).readlines()), len(commands))115 def test_main_resume_by_expanding_pool_default(self):116 # Create SMART_DISPATCH_LOGS structure.117 call(self.launch_command, shell=True)118 batch_uid = os.listdir(self.logs_dir)[0]119 # Simulate that some commands are in the running state.120 nb_commands_files = 2 # 'commands.txt' and 'running_commands.txt'121 path_job_commands = os.path.join(self.logs_dir, batch_uid, "commands")122 pending_commands_file = pjoin(path_job_commands, "commands.txt")123 running_commands_file = pjoin(path_job_commands, "running_commands.txt")124 commands = open(pending_commands_file).read().strip().split("\n")125 with open(running_commands_file, 'w') as running_commands:126 running_commands.write("\n".join(commands[::2]) + "\n")127 with open(pending_commands_file, 'w') as pending_commands:128 pending_commands.write("\n".join(commands[1::2]) + "\n")129 # Remove PBS files so we can check that new ones are going to be created.130 for f in os.listdir(path_job_commands):131 if f.startswith('job_commands_') and f.endswith('.sh'):132 os.remove(pjoin(path_job_commands, f))133 # Should NOT move running commands back to pending but should add new workers.134 command_line = self.resume_command.format(batch_uid)135 command_line += " --expandPool"136 exit_status = call(command_line, shell=True)137 # Test validation138 assert_equal(exit_status, 0)139 assert_equal(len(open(running_commands_file).readlines()), len(commands[::2]))140 assert_equal(len(open(pending_commands_file).readlines()), len(commands[1::2]))141 nb_job_commands_files = len(os.listdir(path_job_commands))142 assert_equal(nb_job_commands_files-nb_commands_files, len(commands[1::2]))143 def test_main_resume_by_expanding_pool(self):144 # Create SMART_DISPATCH_LOGS structure.145 call(self.launch_command, shell=True)146 batch_uid = os.listdir(self.logs_dir)[0]147 # Simulate that some commands are in the running state.148 nb_commands_files = 2 # 'commands.txt' and 'running_commands.txt'149 path_job_commands = os.path.join(self.logs_dir, batch_uid, "commands")150 pending_commands_file = pjoin(path_job_commands, "commands.txt")151 running_commands_file = pjoin(path_job_commands, "running_commands.txt")152 commands = open(pending_commands_file).read().strip().split("\n")153 with open(running_commands_file, 'w') as running_commands:154 running_commands.write("\n".join(commands[::2]) + "\n")155 with open(pending_commands_file, 'w') as pending_commands:156 pending_commands.write("\n".join(commands[1::2]) + "\n")157 # Remove PBS files so we can check that new ones are going to be created.158 for f in os.listdir(path_job_commands):159 if f.startswith('job_commands_') and f.endswith('.sh'):160 os.remove(pjoin(path_job_commands, f))161 # Should NOT move running commands back to pending but should add new workers.162 nb_workers_to_add = 3163 command_line = self.resume_command.format(batch_uid)164 command_line += " --expandPool {}".format(nb_workers_to_add)165 exit_status = call(command_line, shell=True)166 # Test validation167 assert_equal(exit_status, 0)168 assert_equal(len(open(running_commands_file).readlines()), len(commands[::2]))169 assert_equal(len(open(pending_commands_file).readlines()), len(commands[1::2]))170 nb_job_commands_files = len(os.listdir(path_job_commands))...

Full Screen

Full Screen

train.py

Source:train.py Github

copy

Full Screen

1import sys2sys.path.insert(0, './lib/python')3import VAD_Proposed as Vp4import VAD_DNN as Vd5import VAD_bDNN as Vb6import VAD_LSTM_2 as Vl7import scipy.io as sio8import os, getopt9import time10import graph_save as gs11import path_setting as ps12# norm_dir = "./norm_data"13# data_dir = "./sample_data"14# ckpt_name = '/model9918and41.ckpt-2'15# model_dir = "./saved_model"16# valid_batch_size = 413417if __name__ == '__main__':18 try:19 opts, args = getopt.getopt(sys.argv[1:], 'hm:e:', ["prj_dir="])20 except getopt.GetoptError as err:21 print(str(err))22 sys.exit(1)23 if len(opts) != 3:24 print("arguments are not enough.")25 sys.exit(1)26 for opt, arg in opts:27 if opt == '-h':28 sys.exit(0)29 elif opt == '-m':30 mode = int(arg)31 elif opt == '-e':32 extract_feat = int(arg)33 elif opt == '--prj_dir':34 prj_dir = str(arg)35 data_dir = prj_dir + '/data/raw'36 train_data_dir = data_dir + '/train'37 valid_data_dir = data_dir + '/valid'38 save_dir = prj_dir + '/data/feat'39 train_save_dir = save_dir + '/train'40 valid_save_dir = save_dir + '/valid'41 if extract_feat:42 os.system("rm -rf " + save_dir)43 os.system("mkdir " + save_dir)44 os.system("mkdir " + save_dir + '/train')45 os.system("mkdir " + save_dir + '/valid')46 os.system(47 "matlab -r \"try acoustic_feat_ex(\'%s\',\'%s\'); catch; end; quit\"" % (train_data_dir, train_save_dir))48 os.system(49 "matlab -r \"try acoustic_feat_ex(\'%s\',\'%s\'); catch; end; quit\"" % (valid_data_dir, valid_save_dir))50 train_norm_dir = save_dir + '/train/global_normalize_factor.mat'51 test_norm_dir = prj_dir + '/norm_data/global_normalize_factor.mat'52 os.system("cp %s %s" % (train_norm_dir, test_norm_dir))53 if mode == 0:54 set_path = ps.PathSetting(prj_dir, 'ACAM')55 logs_dir = set_path.logs_dir56 os.system("rm -rf " + logs_dir + '/train')57 os.system("rm -rf " + logs_dir + '/valid')58 os.system("mkdir " + logs_dir + '/train')59 os.system("mkdir " + logs_dir + '/valid')60 Vp.main(prj_dir, 'ACAM', 'train')61 # Vp.train_config(save_dir+'/train', save_dir+'/valid', prj_dir+'/logs', batch_size,62 # train_step, 'train')63 #64 # Vp.main()65 gs.freeze_graph(prj_dir + '/logs/ACAM', prj_dir + '/saved_model/graph/ACAM', 'model_1/logits,model_1/raw_labels')66 if mode == 1:67 set_path = ps.PathSetting(prj_dir, 'bDNN')68 logs_dir = set_path.logs_dir69 os.system("rm -rf " + logs_dir + '/train')70 os.system("rm -rf " + logs_dir + '/valid')71 os.system("mkdir " + logs_dir + '/train')72 os.system("mkdir " + logs_dir + '/valid')73 # Vb.train_config(save_dir+'/train', save_dir+'/valid', prj_dir+'/logs', batch_size,74 # train_step, 'train')75 Vb.main(prj_dir, 'bDNN', 'train')76 gs.freeze_graph(prj_dir + '/logs/bDNN', prj_dir + '/saved_model/graph/bDNN', 'model_1/logits,model_1/labels')77 # gs.freeze_graph(prj_dir + '/saved_model/temp', prj_dir + '/saved_model/temp', 'model_1/soft_pred,model_1/raw_labels')78 if mode == 2:79 set_path = ps.PathSetting(prj_dir, 'DNN')80 logs_dir = set_path.logs_dir81 os.system("rm -rf " + logs_dir + '/train')82 os.system("rm -rf " + logs_dir + '/valid')83 os.system("mkdir " + logs_dir + '/train')84 os.system("mkdir " + logs_dir + '/valid')85 Vd.main(prj_dir, 'DNN', 'train')86 gs.freeze_graph(prj_dir + '/logs/DNN', prj_dir + '/saved_model/graph/DNN', 'model_1/soft_pred,model_1/raw_labels')87 # gs.freeze_graph(prj_dir + '/saved_model/temp', prj_dir + '/saved_model/temp', 'model_1/soft_pred,model_1/raw_labels')88 if mode == 3:89 set_path = ps.PathSetting(prj_dir, 'LSTM')90 logs_dir = set_path.logs_dir91 os.system("rm -rf " + logs_dir + '/train')92 os.system("rm -rf " + logs_dir + '/valid')93 os.system("mkdir " + logs_dir + '/train')94 os.system("mkdir " + logs_dir + '/valid')95 Vl.main(prj_dir, 'LSTM', 'train')96 gs.freeze_graph(prj_dir + '/logs/LSTM', prj_dir + '/saved_model/graph/LSTM', 'model_1/soft_pred,model_1/raw_labels')97 # os.system("rm -rf")...

Full Screen

Full Screen

npy_handler.py

Source:npy_handler.py Github

copy

Full Screen

1import numpy as np2import matplotlib.pyplot as plt3def smooth(x, timestamps=9):4 # last 1005 n = len(x)6 y = np.zeros(n)7 for i in range(n):8 start = max(0, i - timestamps)9 y[i] = float(x[start:(i + 1)].sum()) / (i - start + 1)10 return y11# observation analysis12for a in range(0):13 logs_dir = '../Data/results/'14 ob_ls = np.load(logs_dir + '{}'.format('ob_ls') + '.npy')15 ob_ls = np.squeeze(ob_ls)16 ob_mean = np.mean(ob_ls, axis=0)17 ob_std = np.std(ob_ls, axis=0)18 ob_max = np.max(ob_ls, axis=0)19 ob_min = np.min(ob_ls, axis=0)20 print("")21 print("ob_mean:", ob_mean)22 print("")23 print("ob_std:", ob_std)24 obs_mean = np.array([4.83868269e+01, 9.26671424e+01, 6.41770269e+02, -3.11372911e+02,25 6.78844516e-02, 1.27067008e-02, 1.46767778e+02])26 obs_std = np.array([9.87342636e+00, 1.13743122e+02, 1.30954515e+02, 3.87827102e+02,27 5.11422102e-02, 6.47789938e-02, 2.07343922e+02])28 ob_test = ob_ls[110:120, :]29 obs_normalized = (ob_test - obs_mean) / obs_std30# reward original analysis31for a in range(1):32 logs_dir = '../Data/results/'33 reward = np.load(logs_dir + '{}'.format('reward_np') + '.npy')34 reward_mean = np.mean(reward, axis=0)35 reward_std = np.std(reward, axis=0)36 print("")37 print("reward_mean:", reward_mean)38 print("")39 print("reward_std:", reward_std)40 r_mean = -6707.5441 r_std = 7314.1542 r = reward / r_std43 reward_sum = np.sum(reward) # -25595967.3644 r_sum = np.sum(r) # r_sum = -3499.5145 print("")46# reward random action analysis47for a in range(0):48 logs_dir = '../Data-13/results/'49 reward = np.load(logs_dir + '{}'.format('reward_ls') + '.npy')50 reward_mean = np.mean(reward, axis=0)51 reward_std = np.std(reward, axis=0)52 r_mean = -6755.7653 r_std = 7331.1954 r = reward / r_std55 reward_sum = np.sum(reward) # -25779975.8956 r_sum = np.sum(r) # r_sum = -352457 print("")58# speed tracking analysis59for a in range(0):60 logs_dir = '../Data-13/results/'61 v_mph = np.load(logs_dir + '{}'.format('v_mph_ls') + '.npy')62 target_speed = np.load(logs_dir + '{}'.format('target_speed_ls') + '.npy')63 spd_err = target_speed - v_mph64 print("")65# speed tracking analysis66for a in range(0):67 logs_dir = '../Data-1/results/'68 tHist = np.load(logs_dir + '{}'.format('tHist') + '.npy')69 eng_org_ls = np.load(logs_dir + '{}'.format('eng_org_ls') + '.npy')70 eng_new_ls = np.load(logs_dir + '{}'.format('eng_new_ls') + '.npy')71 fig = plt.figure()72 fig1, = plt.plot(tHist[0], eng_org_ls[0], color='red', linewidth=1, label='eng ori')73 fig2, = plt.plot(tHist[0], smooth(eng_new_ls[0]), color='b', linewidth=1, label='eng new')74 # engine torque75 plt.xlim(0, 800)76 plt.ylim(-100, 200)77 plt.ylabel("Engine Torque")78 plt.xlabel("Time(s)")79 plt.title("Engine Torque")80 plt.legend()81 plt.show()82# check the values of SOC83for a in range(0):84 logs_dir = '../Data-1/results/'85 SOC = np.load(logs_dir + '{}'.format('SOC') + '.npy')...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful