How to use list_images method in tempest

Best Python code snippet using tempest_python

augmentation.py

Source:augmentation.py Github

copy

Full Screen

1import random2import numpy as np3import cv24import torch5import SimpleITK as sitk6import numpy as np7import os8import random9import cv210import torch11# Read in all images for a patient as a dict of np arrays12# Always in shape CDHW13def read_data(patient_dir):14 dict_images = {}15 list_structures = ['CT', 'PTV70', 'PTV63', 'PTV56',16 'possible_dose_mask', 'Brainstem', 'SpinalCord',17 'RightParotid', 'LeftParotid', 'Esophagus',18 'Larynx', 'Mandible', 'dose']19 for structure_name in list_structures:20 structure_file = patient_dir + '/' + structure_name + '.nii.gz'21 if structure_name == 'CT':22 dtype = sitk.sitkInt1623 elif structure_name == 'dose':24 dtype = sitk.sitkFloat3225 else:26 dtype = sitk.sitkUInt827 if os.path.exists(structure_file):28 dict_images[structure_name] = sitk.ReadImage(structure_file, dtype)29 # To numpy array (C * Z * H * W)30 dict_images[structure_name] = sitk.GetArrayFromImage(dict_images[structure_name])[np.newaxis, :, :, :]31 else:32 dict_images[structure_name] = np.zeros((1, 128, 128, 128), np.uint8)33 return dict_images34def preprocess_image(dict_images):35 # PTVs36 PTVs = 70.0 / 70. * dict_images['PTV70'] \37 + 63.0 / 70. * dict_images['PTV63'] \38 + 56.0 / 70. * dict_images['PTV56']39 # OARs40 OAR_names = ['Brainstem', 'SpinalCord', 'RightParotid', 'LeftParotid',41 'Esophagus', 'Larynx', 'Mandible']42 OAR_all = np.concatenate([dict_images[OAR] for OAR in OAR_names], axis=0)43 # CT image44 CT = dict_images['CT']45 CT = np.clip(CT, a_min=-1024, a_max=1500)46 CT = CT.astype(np.float32) / 1000.47 # Dose image48 dose = dict_images['dose'] / 70.49 # Possible_dose_mask, the region that can receive dose50 possible_dose_mask = dict_images['possible_dose_mask']51 list_images = [np.concatenate((PTVs, OAR_all, CT), axis=0), # Input52 dose, # Label53 possible_dose_mask]54 return list_images55def random_flip_3d(list_images, list_axis=(0, 1, 2), p=0.5):56 if random.random() <= p:57 if 0 in list_axis:58 if random.random() <= 0.5:59 for image_i in range(len(list_images)):60 list_images[image_i] = list_images[image_i][:, ::-1, :, :].copy()61 if 1 in list_axis:62 if random.random() <= 0.5:63 for image_i in range(len(list_images)):64 list_images[image_i] = list_images[image_i][:, :, ::-1, :].copy()65 if 2 in list_axis:66 if random.random() <= 0.5:67 for image_i in range(len(list_images)):68 list_images[image_i] = list_images[image_i][:, :, :, ::-1].copy()69 return list_images70# Random rotation using OpenCV71def random_rotate_around_z_axis(list_images,72 list_angles,73 list_interp,74 list_boder_value,75 p=0.5):76 if random.random() <= p:77 # Randomly pick an angle list_angles78 _angle = random.sample(list_angles, 1)[0]79 # Do not use random scaling, set scale factor to 180 _scale = 1.81 for image_i in range(len(list_images)):82 for chan_i in range(list_images[image_i].shape[0]):83 for slice_i in range(list_images[image_i].shape[1]):84 rows, cols = list_images[image_i][chan_i, slice_i, :, :].shape85 M = cv2.getRotationMatrix2D(((cols - 1) / 2.0, (rows - 1) / 2.0), _angle, scale=_scale)86 list_images[image_i][chan_i, slice_i, :, :] = \87 cv2.warpAffine(list_images[image_i][chan_i, slice_i, :, :],88 M,89 (cols, rows),90 borderMode=cv2.BORDER_CONSTANT,91 borderValue=list_boder_value[image_i],92 flags=list_interp[image_i])93 return list_images94# Random translation95def random_translate(list_images, roi_mask, p, max_shift, list_pad_value):96 if random.random() <= p:97 exist_mask = np.where(roi_mask > 0)98 ori_z, ori_h, ori_w = list_images[0].shape[1:]99 bz = min(max_shift - 1, np.min(exist_mask[0]))100 ez = max(ori_z - 1 - max_shift, np.max(exist_mask[0]))101 bh = min(max_shift - 1, np.min(exist_mask[1]))102 eh = max(ori_h - 1 - max_shift, np.max(exist_mask[1]))103 bw = min(max_shift - 1, np.min(exist_mask[2]))104 ew = max(ori_w - 1 - max_shift, np.max(exist_mask[2]))105 for image_i in range(len(list_images)):106 list_images[image_i] = list_images[image_i][:, bz:ez + 1, bh:eh + 1, bw:ew + 1]107 # Pad to original size108 list_images = random_pad_to_size_3d(list_images,109 target_size=[ori_z, ori_h, ori_w],110 list_pad_value=list_pad_value)111 return list_images112# To tensor, images should be C*Z*H*W113def to_tensor(list_images):114 for image_i in range(len(list_images)):115 list_images[image_i] = torch.from_numpy(list_images[image_i].copy()).float()116 return list_images117# Pad118def random_pad_to_size_3d(list_images, target_size, list_pad_value):119 _, ori_z, ori_h, ori_w = list_images[0].shape[:]120 new_z, new_h, new_w = target_size[:]121 pad_z = new_z - ori_z122 pad_h = new_h - ori_h123 pad_w = new_w - ori_w124 pad_z_1 = random.randint(0, pad_z)125 pad_h_1 = random.randint(0, pad_h)126 pad_w_1 = random.randint(0, pad_w)127 pad_z_2 = pad_z - pad_z_1128 pad_h_2 = pad_h - pad_h_1129 pad_w_2 = pad_w - pad_w_1130 output = []131 for image_i in range(len(list_images)):132 _image = list_images[image_i]133 output.append(np.pad(_image,134 ((0, 0), (pad_z_1, pad_z_2), (pad_h_1, pad_h_2), (pad_w_1, pad_w_2)),135 mode='constant',136 constant_values=list_pad_value[image_i])137 )...

Full Screen

Full Screen

cs231_filters.py

Source:cs231_filters.py Github

copy

Full Screen

1import numpy as np2import cv23import matplotlib.pyplot as plt4from skimage import exposure5def Sharp_img(img):6 kernel3 = np.array([[-1, -1, -1],7 [-1, 9, -1],8 [-1, -1, -1]])9 res = cv2.filter2D(src=img, ddepth=-1, kernel=kernel3)10 return res11def Gray_img(img):12 res = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)13 return res14def Bw_img(img, t ):15 img_gray_sharp = cv2.cvtColor(Sharp_img(img), cv2.COLOR_BGR2GRAY)16 max_value = np.max( img_gray_sharp )17 t = t * max_value / 25518 #t = np.average(img_gray_sharp) - 2519 thresh, res = cv2.threshold(img_gray_sharp,t,255,cv2.THRESH_BINARY)20 return res21def Equal_img(img, grid = None):22 b, g, r = cv2.split(img)23 if grid is not None:24 clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(grid, grid))25 else:26 clahe = cv2.createCLAHE(clipLimit=3.0)27 clahe_b = clahe.apply(b)28 clahe_g = clahe.apply(g)29 clahe_r = clahe.apply(r) 30 31 res = cv2.merge((clahe_b, clahe_g, clahe_r))32 return res33def Gray_equal_img(img):34 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)35 clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))36 res = clahe.apply(img_gray)37 return res38def Contrast_stretching_img(img ):39 p2, p98 = np.percentile(img, (2, 98))40 res = exposure.rescale_intensity(img, in_range=(p2, p98))41 return res42def Gray_Contrast_stretching_img(img ):43 img = cv2.cvtColor(Sharp_img(img), cv2.COLOR_BGR2GRAY)44 p2, p98 = np.percentile(img, (2, 98))45 res = exposure.rescale_intensity(img, in_range=(p2, p98))46 return res47######################################################48################# TESTING49''' 50List_images = []51# Original Image52_img = cv2.imread("IMG_5530.jpg")53List_images.append(_img)54# Origin -> Sharpening Image55img_sharp = Sharp_img(_img)56List_images.append(img_sharp)57# Origin -> Grayscale Image58img_gray = Gray_img(_img)59List_images.append(img_gray)60# Sharp -> B&W Image61img_bw = Bw_img(_img)62List_images.append(img_bw)63# Origin -> Equalization Image64img_equal = Equal_img(_img)65List_images.append(img_equal)66# Origin -> Equalizing Grayscale Image67img_equal_gray = Gray_equal_img(_img)68List_images.append(img_equal_gray)69# Origin -> Contrast Stretching Image70img_rescale = Contrast_stretching_img(_img)71List_images.append(img_rescale)72# Sharp -> Contrast Stretching Image73img_rescale = Contrast_stretching_img(_img,'sharp')74List_images.append(img_rescale)75# Original -> Contrast Stretching Grayscale Image76img_rescale = Contrast_stretching_img(_img,'gray')77List_images.append(img_rescale)78# Sharp -> Contrast Stretching Grayscale Image79img_rescale = Contrast_stretching_img(_img,'graysharp')80List_images.append(img_rescale)81plt.figure(num="Image Fig" ,figsize=(100,100))82title = ["Origin","Sharp Image","Grayscale","B&W","Equalizing","Grayscale Equalizing"83 ,"Contrast","Sharp Contrast","Grayscale Contrast"84 ,"Grayscale Sharp Contrast"]85idx = 186for i, t in zip(List_images,title):87 img = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)88 plt.subplot(2, 5, idx)89 plt.title(t)90 plt.axis('off')91 plt.imshow(img,cmap='gray')92 idx+=193plt.show()...

Full Screen

Full Screen

testing_code.py

Source:testing_code.py Github

copy

Full Screen

1import numpy as np2import cv23import matplotlib.pyplot as plt4from skimage import exposure5List_images = []6# Original Image7_img = cv2.imread("test.jpg")8List_images.append(_img)9# Origin -> Sharpening Image10kernel3 = np.array([[0, -1, 0],11 [-1, 5, -1],12 [0, -1, 0]])13img_sharp = cv2.filter2D(src=_img, ddepth=-1, kernel=kernel3)14List_images.append(img_sharp)15# Origin -> Grayscale Image16img_gray = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)17List_images.append(img_gray)18# Sharp -> B&W Image19img_gray_sharp = cv2.cvtColor(img_sharp, cv2.COLOR_BGR2GRAY)20t = np.average(img_gray_sharp) - 2521thresh, img_bw = cv2.threshold(img_gray_sharp,t,255,cv2.THRESH_BINARY)22List_images.append(img_bw)23# Origin -> Equalization Image24def Adapt_equal(img, grid = None):25 b, g, r = cv2.split(img)26 if grid is not None:27 clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(grid, grid))28 else:29 clahe = cv2.createCLAHE(clipLimit=3.0)30 clahe_b = clahe.apply(b)31 clahe_g = clahe.apply(g)32 clahe_r = clahe.apply(r) 33 equa_img = cv2.merge((clahe_b, clahe_g, clahe_r))34 return equa_img35img_equal = Adapt_equal(_img,8)36List_images.append(img_equal)37# Origin -> Equalizing Grayscale Image38clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))39img_equal_gray = clahe.apply(img_gray)40List_images.append(img_equal_gray)41#img_equal_gray_s = clahe.apply(img_gray_sharp)42# Origin -> Contrast Stretching Image43p2, p98 = np.percentile(_img, (2, 98))44img_rescale = exposure.rescale_intensity(_img, in_range=(p2, p98))45List_images.append(img_rescale)46# Sharp -> Contrast Stretching Image47p2, p98 = np.percentile(_img, (2, 98))48img_rescale_sharp = exposure.rescale_intensity(img_sharp, in_range=(p2, p98))49List_images.append(img_rescale_sharp)50# Original -> Contrast Stretching Grayscale Image51p2, p98 = np.percentile(img_gray, (2, 98))52img_rescale_gray = exposure.rescale_intensity(img_gray, in_range=(p2, p98))53List_images.append(img_rescale_gray)54# Sharp -> Contrast Stretching Grayscale Image55p2, p98 = np.percentile(img_gray_sharp, (2, 98))56img_rescale_graysharp = exposure.rescale_intensity(img_gray_sharp, in_range=(p2, p98))57List_images.append(img_rescale_graysharp)58""" for i in List_images:59 cv2.resizeWindow("output", 200, 300) 60 cv2.imshow("",i)61 cv2.waitKey(0) """62plt.figure(num="Image Fig" ,figsize=(100,100))63title = ["Origin","Sharp Image","Grayscale","B&W","Equalizing","Grayscale Equalizing"64 ,"Contrast","Sharp Contrast","Grayscale Contrast"65 ,"Grayscale Sharp Contrast"]66idx = 167for i, t in zip(List_images,title):68 img = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)69 plt.subplot(2, 5, idx)70 plt.title(t)71 plt.axis('off')72 plt.imshow(img,cmap='gray')73 idx+=1...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful