How to use get_class_names method in lisa

Best Python code snippet using lisa_python

gideon_cloud.py

Source:gideon_cloud.py Github

copy

Full Screen

...47 print('Layer Output Shape:', layer.output_shape)48 #print('Layer Weights',layer_weights)49print('Total Layers in Network: ', total_layers)50############################### INFERENCE USING KERAS MODEL ###############################51#def get_class_names(path):52def get_class_names():53 class_names = ['abnormal', 'normal']54 #with open(path) as f:55 # for row in f:56 # class_names.append(row[:-1])57 #return class_names58#def classify_video(video_path, net_path):59def classify_video(video_path, output_path):60 SAMPLE_DURATION = 16 #Frames Batch Size for input to network for inference61 SAMPLE_SIZE = 11262 #mean = (114.7748, 107.7354, 99.4750)63 #class_names = get_class_names(args.classes)64 class_names = get_class_names()65 # Image/Input Characteristics:66 img_height = 10067 img_width = 10068 channels = 369 batch_size = 1670 # ----------------------------------------- I M P O R T A N T -----------------------------------------71 #winName = 'Deep learning image classification in OpenCV'72 #cv.namedWindow(winName, cv.WINDOW_AUTOSIZE)73 74 # initialize the image mean for mean subtraction along with the75 # predictions queue76 cap = cv.VideoCapture(video_path)77 writer = None78 (W, H) = (None, None)79 # loop over frames from the video file stream80 frames_set = 081 while cv.waitKey(1) < 0:82 # constructing video frames batch for Conv3D input:83 frames = [] 84 frames_copy = []85 frames_count = 086 87 for _ in range(SAMPLE_DURATION):88 hasFrame, frame = cap.read()89 if not hasFrame:90 exit(0)91 # clone the output frame, then convert it from BGR to RGB92 output = frame.copy()93 frames_copy.append(output)94 95 # if the frame dimensions are empty, grab them96 if W is None or H is None:97 (H, W) = frame.shape[:2]98 99 frame = cv.resize(frame, (100, 100))100 frame = frame /255101 frames.append(frame)102 frames_count += 1103 frames_set += 1104 print("16-Frames Set# ", frames_set)105 print('Frames Count in processed set: ', frames_count)106 #inputs = cv.dnn.blobFromImages(frames, 1, (SAMPLE_SIZE, SAMPLE_SIZE), mean, True, crop=True)107 #inputs = np.transpose(inputs, (1, 0, 2, 3))108 #inputs = np.expand_dims(inputs, axis=0)109 #net.setInput(inputs)110 #outputs = net.forward()111 #class_pred = np.argmax(outputs)112 #label = class_names[class_pred]113 114 # Performing Prediction on batch of 16 frames:115 frames = np.array(frames).reshape(-1, 16, 100, 100, 3)116 #_ = input('[INFO] >>> Press enter to predict !!!')117 pred_npy = model.predict(frames, batch_size=batch_size, verbose=0)118 #print('Shape of Prediction List: ', pred_npy.shape)119 pred = pred_npy.argmax(axis=1)120 #label = class_names[pred]121 122 threshold = 0.70123 if pred_npy[0][0] >= threshold:124 event = 'Abnormal'125 else:126 event = 'Normal'127 print('Probability (Abnormal):', pred_npy[0][0])128 print('Probability (Normal):', pred_npy[0][1])129 print('Prediction: >> ', event)130 131 # clone the output frame, then convert it from BGR to RGB132 # ordering, resize the frame to a fixed 100x100, and then:133 for frame in frames_copy:134 # draw the activity on the output frame135 text = "Event: {}".format(event)136 cv.putText(frame, text, (20, 20), cv.FONT_HERSHEY_DUPLEX, 0.50, (0, 255, 0), 2)137 text = "Abnormal Probability: {0:0.2f}".format(pred_npy[0][0])138 cv.putText(frame, text, (20, 40), cv.FONT_HERSHEY_SIMPLEX, 0.50, (0, 255, 0), 2)139 text = "Normal Probability: {0:0.2f}".format(pred_npy[0][1])140 cv.putText(frame, text, (20, 60), cv.FONT_HERSHEY_SIMPLEX, 0.50, (0, 255, 0), 2)141 # check if the video writer is None142 if writer is None:143 # initialize our video writer144 fourcc = cv.VideoWriter_fourcc(*"H264")145 writer = cv.VideoWriter(output_path, fourcc, 30, (W, H), True)146 # write the output frame to disk147 writer.write(frame)148 print("[INFO] cleaning up system resources...")149 writer.release()150 cap.release()151 #for frame in frames:152 # labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)153 # cv.rectangle(frame, (0, 10 - labelSize[1]),154 # (labelSize[0], 10 + baseLine), (255, 255, 255), cv.FILLED)155 # cv.putText(frame, label, (0, 10), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))156 # cv.imshow(winName, frame)157 #if cv.waitKey(1) & 0xFF == ord('q'):158 # break159 160if __name__ == "__main__":161 args, _ = parser.parse_known_args()162 classify_video(args.input if args.input else 0, args.output)163#def get_class_names(path):164def get_class_names():165 class_names = ['None', 'Abnormal', 'Normal']166 #with open(path) as f:167 # for row in f:168 # class_names.append(row[:-1])169 #return class_names170#def classify_video(video_path, net_path):171def classify_video(video_path):172 SAMPLE_DURATION = 16 #Frames Batch Size for input to network for inference173 SAMPLE_SIZE = 112174 #mean = (114.7748, 107.7354, 99.4750)175 #class_names = get_class_names(args.classes)176 class_names = get_class_names()177 # Image/Input Characteristics:178 img_height = 100179 img_width = 100180 channels = 3181 batch_size = 16182 winName = 'Deep learning image classification in OpenCV'183 cv.namedWindow(winName, cv.WINDOW_AUTOSIZE)184 cap = cv.VideoCapture(video_path)185 while cv.waitKey(1) < 0:186 # constructing video frames batch for Conv3D input:187 frames = []188 for _ in range(SAMPLE_DURATION):189 hasFrame, frame = cap.read()190 if not hasFrame:...

Full Screen

Full Screen

debug_list.py

Source:debug_list.py Github

copy

Full Screen

...11#print(dbio.get_sql_table_as_df('classification_items', addSQL="WHERE classification_id = %s" % 1)["attribute" + str(3)].values)12#validate.aspect_in_db(candidate_file, verbose=True)13#validate.valid_attribute(candidate_file)14#print(validate.create_aspects_table(candidate_file))15#validate.get_class_names(candidate_file)16#validate.check_db_classification("origin_process__1_F_steel_SankeyFlows_2008_Global")17# validate.get_class_names(candidate_file)18# validate.create_db_class_defs(candidate_file)19# validate.create_db_class_items(candidate_file)20# validate.check_classification_definition(validate.get_class_names(candidate_file),21# crash=False, custom_only=False)22# validate.check_classification_items(validate.get_class_names(candidate_file),23# file_io.read_candidate_data_list(candidate_file),24# crash=False, custom_only=False)25# validate.upload_data_list(candidate_file, crash=False)26#dbio.dict_sql_insert(1)27#validate.add_license(candidate_file)28#validate.add_user(candidate_file)29#class_names = validate.get_class_names(file)30#file_data = file_io.read_candidate_data_list(file)31#validate.check_classification_definition(class_names, crash=False)32# validate.create_db_class_defs(file)33# validate.create_db_class_items(file)34#print(validate.check_classification_items(class_names, file_data, crash=False))35#validate.create_db_class_items(file)36exclude_files = [37 '6_MIP_YSTAFDB_MetalUseShares_v1.0.xlsx', # Done38 '4_PY_YSTAFDB_EoL_RecoveryRate_v1.0.xlsx', # Done39 # '6_URB_MetabolismOfCities_Jan2019_DOI_7326485.v1.xlsx' # TODO40 ]41path = IEDC_paths.candidates42focus = []43for file in tqdm(file_io.get_candidate_filenames(path, verbose=1)):44 if file in exclude_files:45 print("Skipping %s" % file)46 continue47 if len(focus) > 0 and file not in focus:48 continue49 try:50 file_meta = file_io.read_candidate_meta(file, path=path)51 aspects_table = validate.create_aspects_table(file_meta)52 class_names = validate.get_class_names(file_meta, aspects_table)53 file_data = file_io.read_candidate_data_list(file, path)54 if not all(validate.check_classification_definition(class_names, crash=False, warn=False)):55 validate.create_db_class_defs(file_meta, aspects_table)56 if not all(validate.check_classification_items(class_names, file_meta, file_data, crash=False, warn=False)):57 validate.create_db_class_items(file_meta, aspects_table, file_data)58 validate.add_user(file_meta, quiet=True)59 validate.add_license(file_meta, quiet=True)60 validate.check_datasets_entry(file_meta, crash_on_exist=False, create=True, update=False, replace=True)61 validate.upload_data_list(file_meta, aspects_table, file_data, crash=False)62 except BaseException as e:63 print("ERROR: File '%s' caused an issue. See stack." % file)...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

...4from api.core.cnn.cnn_model import CnnModel5app = FastAPI()6cnn = CnnModel()7@app.get("/cnn/class/names")8def get_class_names():9 return cnn.get_class_names()10@app.post("/cnn/classify")11async def classify(file: bytes = File(...)):12 image = Image.open(io.BytesIO(file))13 return cnn.classify(image)14@app.get("/cnn/exists/label/{label}")15def exists_label(label):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful