How to use get_image_parameters method in avocado

Best Python code snippet using avocado_python

dicom.py

Source:dicom.py Github

copy

Full Screen

...32 self.target_rois = [] 33 def get_patient_id(self):34 return pydicom.read_file(self.rs_file).PatientID35 36 def get_image_parameters(self, ct_files):37 '''Currently this should return the parameters of the last one'''38 slices = []39 scans = [pydicom.read_file(s) for s in ct_files]40 scans.sort(key = lambda x: int(x.InstanceNumber))41 for ct in scans: 42 slices.append(ct.ImagePositionPatient[2]) 43 44 center = (ct.ImagePositionPatient[0],ct.ImagePositionPatient[1])45 center = np.around(center, decimals=1)46 47 spacing = (float(ct.PixelSpacing[0]) ,float(ct.PixelSpacing[1]), float(ct.SliceThickness))48 dims = (int(ct.Rows), int(ct.Columns), len(ct_files))49 50 return slices, center, spacing, dims51 def set_target_rois(self,roi_candidates):52 for roi_name in roi_candidates: 53 if self.check_contour_data(roi_name): 54 self.target_rois.append(roi_name) 55 return self.target_rois56 def check_contour_data(self,roi):57 try:58 roi_index = self.get_structures(self.rs_file)[roi]['index']59 contour_data = self.get_contour_data(self.rs_file,roi_index) 60 assert len(contour_data) > 061 return True62 except:63 return False64 65 def get_slice_contours(self, contour_data, slices):66 slice_contours = {k: [] for k in slices}67 for i in range(0, len(contour_data)): 68 slice_contours[contour_data[i][2]].append(contour_data[i])69 return slice_contours70 71 def get_structures(self, rs_file=None):72 if rs_file is None:73 rs_file = self.rs_file 74 rs = pydicom.read_file(rs_file)75 structures = {} 76 for i in range(len(rs.StructureSetROISequence)):77 structures[rs.StructureSetROISequence[i].ROIName] = {}78 structures[rs.StructureSetROISequence[i].ROIName]['index'] = i79 return structures80 def get_contour_data(self, rs_file, roi_index):81 rs = pydicom.read_file(rs_file)82 contour_seq = rs.ROIContourSequence[roi_index].ContourSequence 83 roi_contour_data = []84 for slice_contour in contour_seq: 85 slice_contour_data = slice_contour.ContourData 86 for i in range(len(slice_contour_data)):87 slice_contour_data[i] = slice_contour_data[i]88 roi_contour_data.append(slice_contour_data)89 return roi_contour_data90 91 92 def get_label_volume(self, roi=None, target_index=0):93 if roi == None:94 roi = self.target_rois[target_index] 95 ds_file = self.rs_file96 patient_dir =self.dicom_dir97 ct_file_list = self.ct_files98 slices, center, spacing, dims = self.get_image_parameters(ct_file_list) 99 100 roi_index = self.get_structures(ds_file)[roi]['index']101 contour_data = self.get_contour_data(ds_file, roi_index)102 slice_contours = self.get_slice_contours(contour_data, slices)103 label_volume = np.zeros(dims, dtype=np.dtype('uint8')) 104 for k, sl in enumerate(slice_contours): 105 contour = slice_contours[sl] 106 slice_label = self.draw_slice_label(contour, center, spacing, dims)107 label_volume[:, :, k] = slice_label108 109 if slices[0] > slices[1]: 110 label_volume = label_volume[:, :, ::-1]111 label_volume = label_volume.reshape(dims)112 return label_volume113 def draw_slice_label(self, contour, center, spacing, dims):114 img = Image.new("1", (dims[0], dims[1]))115 draw = ImageDraw.Draw(img)116 slice_label = np.zeros([dims[0], dims[1]])117 for c in contour: 118 x = [c[i] for i in range(0, len(c)) if i % 3 == 0]119 y = [c[i] for i in range(0, len(c)) if i % 3 == 1]120 x.append(x[0])121 y.append(y[0]) 122 poly = [(int((x - center[0]) / spacing[0]), int((y - center[1]) / spacing[0])) for x, y in zip(x, y)]123 draw.polygon(poly, fill=1, outline=1)124 for i in range(0, dims[1]):125 for j in range(0, dims[0]):126 slice_label[i, j] = img.getpixel((j, i))127 return slice_label128 def get_image_volume(self):129 ct_file_list = self.ct_files130 slices, _, _, dims = self.get_image_parameters(ct_file_list)131 image_volume = np.zeros(dims)132 idx = []133 for ct_file in ct_file_list: 134 idx.append(ct_file_list.index(ct_file))135 ct = pydicom.read_file(ct_file)136 image_volume[:, :, ct.InstanceNumber-1] = ct.pixel_array 137 if slices[0] > slices[1]: 138 image_volume = image_volume[:, :, ::-1]139 image_volume = image_volume * ct.RescaleSlope + ct.RescaleIntercept 140 image_volume = image_volume.reshape(dims)141 return image_volume142def get_patient_dict(rs_file_list):143 patient_dict = {}144 for rs_file in rs_file_list: 145 rs = pydicom.read_file(rs_file)146 patient_dict[int(rs.PatientID)] = rs_file147 return patient_dict148def parse_directory(directory, all_scans = False): 149 p_dict = {}150 for patient_id, respiratory_phases in directory.items(): 151 p_dict[patient_id] = {}152 for i, respiratory_phase in enumerate(respiratory_phases): 153 ct_dir, rs_dir = respiratory_phase['CT'], respiratory_phase['RT']154 p = DicomInterface(dicom_dir=ct_dir, rs_dir=rs_dir)155 p_dict[patient_id][i] = p156 157 return p_dict158def discover_names(p_dict, roi_tags, not_roi_tags=None):159 '''This method searches for a specific set of indicator strings within each found ROI tag to find a specific ROI.160 '''161 # roi_tags should be a list of strings162 tags = set()163 roi_tags = list(map(lambda x: x.lower(), roi_tags)) 164 for patient in p_dict.keys(): 165 for j, phase in p_dict[patient].items(): 166 structures = phase.get_structures() 167 for s in structures:168 flag = False169 for tag in roi_tags: 170 if tag in s.lower():171 flag = True172 else:173 flag = False174 break175 176 for not_tag in not_roi_tags:177 if not_tag in s:178 flag = False179 else:180 continue 181 182 if flag:183 tags.add(s) 184 185 return tags186def assign_roi_label(p_dict, roi_candidates):187 del_list = []188 m_count = 0 189 s_count = 0190 for patient in p_dict.keys(): 191 for i, phase in p_dict[patient].items(): 192 p_dict[patient][i].set_target_rois(roi_candidates) 193 if len( p_dict[patient][i].target_rois) == 1:194 s_count = s_count + 1195 if len( p_dict[patient][i].target_rois) > 1:196 m_count = m_count + 1197 if len( p_dict[patient][i].target_rois) < 1:198 del_list.append(patient + '_' + str(i))199 200 return len(del_list), m_count, s_count, p_dict201def extract_cases(p_dict, data_dir, base_name):202 if not os.path.exists(data_dir):203 os.makedirs(data_dir) 204 205 patients = list(p_dict.keys())206 207 error = {}208 count = 0209 for patient in patients: 210 patient_dir = data_dir + patient + '/'211 if not os.path.exists(patient_dir):212 os.makedirs(patient_dir)213 scan_dir = patient_dir + 'original_data/'214 if not os.path.exists(scan_dir):215 os.makedirs(scan_dir) 216 217 218 for i, phase in p_dict[patient].items(): 219 _, _, spacing, _ = p_dict[patient][i].get_image_parameters(p_dict[patient][i].ct_files) 220 count = count + 1 221 if 0 in spacing:222 continue223 #new_spacing = (1.0, 1.0, 1.0)224 225 labs = {}226 img = p_dict[patient][i].get_image_volume()227 # applying Hounsfield Unit window228 #window_center = -300229 #window_width = 1400230 #img = HU_window(img, window_center, window_width) 231 232 for idx, label in enumerate(p_dict[patient][i].target_rois):233 lab = p_dict[patient][i].get_label_volume(target_index = idx) ...

Full Screen

Full Screen

test_vision.py

Source:test_vision.py Github

copy

Full Screen

...53 self.assertTrue(cam_event.wait(10))54 self.assertIsNone(self.vision.get_img_compressed.unsubscribe())55 def test_image_parameters(self):56 self.assertIsInstance(self.vision.get_image_parameters, NiryoTopic)57 self.assertIsInstance(self.vision.get_image_parameters(), ImageParameters)58 img_param_event = Event()59 img_param_event.clear()60 def img_param_callback(img_param):61 self.assertIsInstance(img_param, ImageParameters)62 img_param_event.set()63 self.assertIsNone(self.vision.get_image_parameters.subscribe(img_param_callback))64 self.assertTrue(img_param_event.wait(10))65 self.assertIsNone(self.vision.get_image_parameters.unsubscribe())66 old_img_param = self.vision.get_image_parameters.value67 for i, function in enumerate(68 [self.vision.set_brightness, self.vision.set_contrast, self.vision.set_saturation]):69 new_value = old_img_param[i] + 1.070 self.assertIsNone(function(new_value))71 self.assertEqual(self.vision.get_image_parameters()[i], new_value)72 self.assertIsNone(function(1))73 self.assertEqual(self.vision.get_image_parameters()[i], 1)74 with self.assertRaises(RobotCommandException):75 function("1")76 with self.assertRaises(RobotCommandException):77 function(True)78 def test_workspace(self):79 ws_name = "unit_test_ws"80 self.assertIsNone(self.vision.delete_workspace(ws_name))81 unit_test_ws_poses = [ws_name,82 [0.3, -0.1, 0.0, 0.0, 1.57, 0.0],83 PoseObject(0.3, 0.1, 0.0, 0.0, 1.57, 0.0),84 PoseObject(0.1, 0.1, 0.0, 0.0, 1.57, 0.0),85 [0.1, -0.1, 0.0, 0.0, 1.57, 0.0]]86 unit_test_ws_points = [ws_name,87 [0.3, -0.1, 0.0],...

Full Screen

Full Screen

snrseries.py

Source:snrseries.py Github

copy

Full Screen

...9other_particle_range = 2510particle_distance = 10111213def get_image_parameters():14 (particles_center_x, particles_center_y) = deeptracknew.particle_positions(particle_number, first_particle_range, other_particle_range, particle_distance)15 image_parameters = {}1617 image_parameters['Particle Center X List'] = particles_center_x18 image_parameters['Particle Center Y List'] = particles_center_y19 image_parameters['Particle Radius List'] = uniform(1, 3, particle_number)20 image_parameters['Particle Bessel Orders List'] = [[1, ], 21 [1, ],22 [1, ], 23 [1, ]]24 image_parameters['Particle Intensities List'] = [[uniform(0.5, 0.7, 1), ], 25 [uniform(0.5, 0.7, 1), ],26 [uniform(0.5, 0.7, 1), ], 27 [uniform(0.5, 0.7, 1), ]]28 image_parameters['Image Half-Size'] = 2529 image_parameters['Image Background Level'] = uniform(0,.2)30 image_parameters['Signal to Noise Ratio'] = uniform(8,10)31 image_parameters['Gradient Intensity'] = uniform(0, 0.1)32 image_parameters['Gradient Direction'] = uniform(-0.2, 0.2)#-pi to pi usually33 image_parameters['Ellipsoid Orientation'] = uniform(-pi, pi, particle_number)34 image_parameters['Ellipticity'] = uniform(1,1.3)3536 return image_parameters373839image_parameters_function = lambda : get_image_parameters()4041### Define image generator42image_generator = lambda : deeptracknew.get_image_generator(image_parameters_function)4344### Show some examples of generated images45number_of_images_to_show = 104647for image_number, image, image_parameters in image_generator():48 if image_number>=number_of_images_to_show:49 break50 51 deeptracknew.plot_sample_image(image, image_parameters)52### Load the pretrained network53saved_network_file_name = 'neuromorph_psf_new Network 2020-07-25-172716 C-16-32-64 D-32-32 training-8x5001-32x4001-128x2501-512x1001-1024x101.h5' ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful