How to use save_data_as method in SeleniumBase

Best Python code snippet using SeleniumBase

train_conv3d-checkpoint.py

Source:train_conv3d-checkpoint.py Github

copy

Full Screen

1# imports2import warnings3warnings.filterwarnings('ignore')4import os, cv2, shutil, json5import numpy as np, pandas as pd, pickle as pkl6from glob import glob7from time import time8from datetime import datetime9from tqdm import tqdm10from sklearn.metrics import precision_recall_curve, average_precision_score, accuracy_score11from sklearn.metrics.pairwise import cosine_similarity12from sklearn.model_selection import train_test_split13from keras.applications.mobilenet import MobileNet14from keras.models import Model, load_model as K_load_model15from keras.layers import LSTM, Dense, InputLayer16from c3d_model import c3d_model17class DataHandler:18 '''19 Handles all operations with respect to data20 '''21 def __init__(self, videos_path, test_size = 0.05):22 '''23 Initalizes the class variables for data handling24 '''25 self.n_frames = 1626 self.operating_resolution = (224, 224)27 self.test_split = test_size28 self.videos_path = videos_path29 self.image_feature_extractor = self.get_mobilenet_feature_extractor()30 31 def get_mobilenet_feature_extractor(self):32 '''33 Returns the mobilenet feature extractor34 '''35 mobilenet = MobileNet()36 return Model(inputs=mobilenet.inputs, output=mobilenet.get_layer("global_average_pooling2d_1").output)37 def sample_frames(self, video_path):38 '''39 Gets 'n' number of frames, each of resolution 'w' x 'h' and 3 channels (RGB) from a video40 Uses equidistant sampling of frames41 '''42 cap = cv2.VideoCapture(video_path)43 read_count = 144 frames_list = list()45 frame_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))46 while cap.isOpened():47 isRead, frame = cap.read()48 if not isRead: break49 if read_count % (int(frame_total/self.n_frames) -1) == 0:50 frame = cv2.resize(frame, self.operating_resolution)51 frames_list.append(frame)52 read_count += 153 if len(frames_list) == self.n_frames: break54 return np.array(frames_list[:self.n_frames])55 def get_frame_features(self, frames):56 '''57 Returns features for each frame58 '''59 return np.squeeze(self.image_feature_extractor.predict(frames))60 def extract_video_features(self, video_file):61 '''62 Returns array of fram features for a video63 '''64 frames = self.sample_frames(video_file)65 return self.get_frame_features(frames)66 def prepare_training_data(self, videos_path):67 '''68 Returns data and labels for all videos in a directory69 '''70 folders = sorted(os.listdir(videos_path))71 classes = dict([(folder, idx) for idx, folder in enumerate(folders)])72 n_classes = len(classes)73 frame_features = list()74 labels = list()75 videos_list = list()76 for folder in folders:77 folder_path = os.path.join(self.videos_path, folder)78 video_files = sorted(glob(os.path.join(folder_path, "*")))79 for video_file in video_files:80 frame_features.append(self.extract_video_features(video_file))81 labels.append(classes[folder])82 videos_list.append(video_file)83 return np.array(frame_features), np.array(labels), np.array(videos_list), classes84 def get_training_data(self, save_data_as = None, data_pickle = None):85 '''86 Prepares the preprocessed training data and labels87 '''88 if data_pickle == None:89 if save_data_as == None: save_data_as = "data.pkl"90 if ".pkl" not in save_data_as: save_data_as += ".pkl"91 X, y, video_list, classes = self.prepare_training_data(video_path)92 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = self.test_split, random_state=42)93 pkl.dump({"X_train": X_train, "y_train": y_train, "X_test": X_test, "y_test": y_test, "classes": classes, "videos": video_list}, open(save_data_as, "wb"))94 else:95 data_dict = pkl.load(open(data_pickle, "rb"))96 X_train, y_train, X_test, y_test, video_list, classes = data_dict["X_train"], data_dict["y_train"], data_dict["X_test"], data_dict["y_test"], data_dict["videos"], data_dict["classes"]97 return X_train, X_test, y_train, y_test, videos, classes98class Trainer(DataHandler):99 '''100 Handles all the training operations101 '''102 def __init__(self, data_to_use = None, operating_resolution = (224, 224)):103 '''104 Initializes the training class variables105 '''106 DataHandler.__init(self, operating_resolution)107 self.operating_resolution = operating_resolution108 self.training_version = str(datetime.datetime.now())[:16].replace("-", "_").replace(" ", "_")109 os.mkdir(self.training_version)110 save_data_as = None111 if data_to_use == None:112 save_data_as = os.path.split(model_path)[0] + "data.pkl"113 self.X_train, self.X_test, self.y_train, self.y_test, self.videos, self.classes = self.get_training_data(save_data_as = save_data_as, data_pickle = data_to_use)114 self.n_classes = len(self.classes)115 # training params116 self.epochs = 50117 self.batch_size = 32118 def train(self, pretrained_model = None, model_path = None):119 '''120 Runs the training121 '''122 if pretrained_model != None: self.c3d_model = load_model(pretrained_model)123 else: self.c3d_model = c3d_model(resolution = self.operating_resolution, n_frames = 16, channels = 3, nb_classes = 3)124 if model_path == None: model_path = "C3D_E{epoch:02d}_VA{val_accuracy:.2f}.hdf5"125 model_path = os.path.join(self.training_version, model_path)126 callbacks = [ModelCheckpoint(model_path, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')]127 128 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')...

Full Screen

Full Screen

train_conv3d.py

Source:train_conv3d.py Github

copy

Full Screen

1# imports2import warnings3warnings.filterwarnings('ignore')4import os, cv2, shutil, json5import numpy as np, pandas as pd, pickle as pkl6from glob import glob7from time import time8from datetime import datetime9from tqdm import tqdm10from sklearn.metrics import precision_recall_curve, average_precision_score, accuracy_score11from sklearn.metrics.pairwise import cosine_similarity12from sklearn.model_selection import train_test_split13from keras.applications.mobilenet import MobileNet14from keras.models import Model, load_model as K_load_model15from keras.layers import LSTM, Dense, InputLayer16from keras.callbacks.callbacks import ModelCheckpoint17from c3d_model import c3d_model18class DataHandler:19 '''20 Handles all operations with respect to data21 '''22 def __init__(self, videos_path = "/mnt/E2F262F2F262C9FD/PROJECTS/media_retrieval/Datasets/KTH/train/", test_size = 0.05):23 '''24 Initalizes the class variables for data handling25 '''26 self.n_frames = 1627 self.operating_resolution = (224, 224)28 self.test_split = test_size29 self.videos_path = videos_path30 def sample_frames(self, video_path):31 '''32 Gets 'n' number of frames, each of resolution 'w' x 'h' and 3 channels (RGB) from a video33 Uses equidistant sampling of frames34 '''35 cap = cv2.VideoCapture(video_path)36 read_count = 137 frames_list = list()38 frame_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))39 while cap.isOpened():40 isRead, frame = cap.read()41 if not isRead: break42 if read_count % (int(frame_total/self.n_frames) -1) == 0:43 frame = cv2.resize(frame, self.operating_resolution)44 frames_list.append(frame)45 read_count += 146 if len(frames_list) == self.n_frames: break47 return np.array(frames_list[:self.n_frames])48 def extract_video_features(self, video_file):49 '''50 Returns array of fram features for a video51 '''52 return self.sample_frames(video_file)53 def prepare_training_data(self, videos_path):54 '''55 Returns data and labels for all videos in a directory56 '''57 folders = sorted(os.listdir(videos_path))58 classes = dict([(folder, idx) for idx, folder in enumerate(folders)])59 n_classes = len(classes)60 frame_features = list()61 labels = list()62 videos_list = list()63 for folder in folders:64 folder_path = os.path.join(self.videos_path, folder)65 video_files = sorted(glob(os.path.join(folder_path, "*")))66 for video_file in video_files:67 frame_features.append(self.extract_video_features(video_file))68 labels.append(classes[folder])69 videos_list.append(video_file)70 return np.array(frame_features), np.array(labels), np.array(videos_list), classes71 def get_training_data(self, save_data_as = None, data_pickle = None):72 '''73 Prepares the preprocessed training data and labels74 '''75 if data_pickle == None:76 if save_data_as == None: save_data_as = "data.pkl"77 if ".pkl" not in save_data_as: save_data_as += ".pkl"78 X, y, video_list, classes = self.prepare_training_data(self.videos_path)79 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = self.test_split, random_state=42)80 pkl.dump({"X_train": X_train, "y_train": y_train, "X_test": X_test, "y_test": y_test, "classes": classes, "videos": video_list}, open(save_data_as, "wb"))81 else:82 data_dict = pkl.load(open(data_pickle, "rb"))83 X_train, y_train, X_test, y_test, video_list, classes = data_dict["X_train"], data_dict["y_train"], data_dict["X_test"], data_dict["y_test"], data_dict["videos"], data_dict["classes"]84 return X_train, X_test, y_train, y_test, video_list, classes85class Trainer(DataHandler):86 '''87 Handles all the training operations88 '''89 def __init__(self, data_to_use = "/mnt/E2F262F2F262C9FD/PROJECTS/media_retrieval/training/Conv3D/2020_04_24_19:06/data.pkl", operating_resolution = (224, 224)):90 '''91 Initializes the training class variables92 '''93 DataHandler.__init__(self)94 self.operating_resolution = operating_resolution95 self.training_version = str(datetime.now())[:16].replace("-", "_").replace(" ", "_")96 os.mkdir(self.training_version)97 save_data_as = None98 if data_to_use == None:99 save_data_as = os.path.join(self.training_version, "data.pkl")100 self.X_train, self.X_test, self.y_train, self.y_test, self.videos, self.classes = self.get_training_data(save_data_as = save_data_as, data_pickle = data_to_use)101 self.n_classes = len(self.classes)102 # training params103 self.epochs = 50104 self.batch_size = 32105 def train(self, pretrained_model = None, model_path = None):106 '''107 Runs the training108 '''109 if pretrained_model != None: self.c3d_model = load_model(pretrained_model)110 else: self.c3d_model = c3d_model(resolution = self.operating_resolution, n_frames = 16, channels = 3, nb_classes = 3)111 if model_path == None: model_path = "C3D_E{epoch:02d}_VA{val_accuracy:.2f}.hdf5"112 model_path = os.path.join(self.training_version, model_path)113 callbacks = [ModelCheckpoint(model_path, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')]114 115 self.c3d_model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')116 self.c3d_model.fit(self.X_train, self.y_train, epochs=self.epochs, batch_size=self.batch_size, validation_split=validation_split, shuffle=True, verbose=2, callbacks = callbacks)117if __name__ == '__main__':118 tr = Trainer()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run SeleniumBase automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful