How to use test_volume_list method in tempest

Best Python code snippet using tempest_python

unet3d_train.py

Source:unet3d_train.py Github

copy

Full Screen

1import numpy as np2import scipy.io as sio3import os, re, random, time4import tensorflow as tf5from unet3d import unet3d6def tversky_loss(labels, logits, output_channels, alpha=0.5, beta=0.5):7 prob = tf.nn.softmax(logits=logits)8 p0 = prob9 p1 = 1 - prob10 g0 = labels11 g1 = 1 - labels12 num = tf.reduce_sum(p0 * g0, axis=(0, 1, 2, 3))13 den = num + alpha * tf.reduce_sum(p0 * g1, axis=(0, 1, 2, 3)) + beta * tf.reduce_sum(p1 * g0, axis=(0, 1, 2, 3))14 t = tf.reduce_sum(num / den)15 return output_channels - t16if __name__ == '__main__':17 dataset = 'VISCERAL_multiple' # 'VISCERAL_multiple', 'SLIVER07_liver'18 os.environ['CUDA_VISIBLE_DEVICES'] = '0'19 cross_validation = 120 restore = False21 vol_size = [8, 512, 512]22 if dataset == 'VISCERAL_multiple':23 data_train_path = '/data2/PEICHAO_LI/data/VISCERAL/multiple_2fold_%d_%d_%d/train_%d/' % (24 vol_size[2], vol_size[1], vol_size[0], cross_validation)25 data_test_path = '/data2/PEICHAO_LI/data/VISCERAL/multiple_2fold_%d_%d_%d/test_%d/' % (26 vol_size[2], vol_size[1], vol_size[0], cross_validation)27 elif dataset == 'SLIVER07_liver': # TODO28 data_train_path = '/data2/XIAOYUN_ZHOU/SLiver07/liver_2fold/liver_train_%d/' % (cross_validation)29 data_test_path = '/data2/XIAOYUN_ZHOU/SLiver07/liver_2fold/liver_test_%d/' % (cross_validation)30 train_volume_list = sorted(31 [vol_name for vol_name in os.listdir(data_train_path) if vol_name.endswith('_volume.mat')])32 train_label_list = sorted(33 [label_name for label_name in os.listdir(data_train_path) if label_name.endswith('_label.mat')])34 test_volume_list = sorted([vol_name for vol_name in os.listdir(data_test_path) if vol_name.endswith('_volume.mat')])35 test_label_list = sorted(36 [label_name for label_name in os.listdir(data_test_path) if label_name.endswith('_label.mat')])37 train_num = len(train_volume_list)38 test_num = len(test_volume_list)39 batch_size = 140 epoch = 641 feature_channels = 1642 output_channels = 843 downsampling = 344 loss_type = 'cross_entropy' # 'cross_entropy' or 'dice'45 step_show = 10046 config = tf.ConfigProto()47 config.gpu_options.allow_growth = True48 vol_in = tf.placeholder(dtype='float32', shape=[None, vol_size[0], vol_size[1], vol_size[2], 1])49 labels = tf.placeholder(dtype='float32', shape=[None, vol_size[0], vol_size[1], vol_size[2], output_channels])50 prediction, logits = unet3d(vol_in, labels, feature_channels=feature_channels, output_channels=output_channels,51 downsampling=downsampling)52 if loss_type.upper() in ['DICE']:53 loss = tversky_loss(labels=tf.stop_gradient(labels), logits=logits, output_channels=output_channels)54 else:55 loss = tf.reduce_mean(56 tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.stop_gradient(labels), logits=logits))57 tf.summary.scalar('Loss', loss)58 # tf.summary.scalar('IoU', prediction['IoU'])59 for lr_train in [0.1, 0.05, 0.01, 0.005]:60 boundary = [train_num // batch_size, train_num * 2 // batch_size]61 lr_values = [lr_train, lr_train / 2, lr_train / 10]62 save_path = '/media/xz6214/4276F10376F0F90D/trained_model/%s/%d_%d_%d/unet3d/model_lr_%f_crossval_%s/' % (63 dataset, vol_size[2], vol_size[1], vol_size[0], lr_train, cross_validation)64 if not restore:65 os.system('rm -rf %s' % save_path)66 if not os.path.exists(save_path):67 os.makedirs(save_path)68 logfile = open(save_path + 'training_log.txt', 'w+')69 logfile.write('********************* LR = %f *********************\n' % lr_train)70 global_step = tf.Variable(0, trainable=False)71 lr = tf.train.piecewise_constant(x=global_step, boundaries=boundary, values=lr_values)72 optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)73 train = optimizer.minimize(loss, global_step=global_step)74 tf.summary.scalar('Learning Rate', lr)75 merged_summary_op = tf.summary.merge_all()76 init = tf.global_variables_initializer()77 saver = tf.train.Saver(max_to_keep=epoch)78 with tf.Session(config=config) as sess:79 tf.train.write_graph(graph_or_graph_def=sess.graph_def, logdir=save_path, name='Model')80 writer = tf.summary.FileWriter(save_path, sess.graph)81 sess.run(init)82 total_loss = 083 total_parameters = 084 for variable in tf.trainable_variables():85 shape = variable.get_shape()86 variable_parameters = 187 for dim in shape:88 variable_parameters *= dim.value89 total_parameters += variable_parameters90 print(total_parameters)91 logfile.write('total parameters: %d\n' % total_parameters)92 if restore:93 ckpt = tf.train.get_checkpoint_state(save_path)94 if ckpt and ckpt.model_checkpoint_path:95 saver.restore(sess, ckpt.model_checkpoint_path)96 start_time = time.time()97 for step in range((train_num // batch_size) * epoch):98 image_id_list = random.sample(range(train_num), batch_size)99 image_batch = []100 label_batch = []101 for image_id in image_id_list:102 image_id = np.random.choice(train_num) + 1103 image_np = sio.loadmat(data_train_path + "%s_volume.mat" % (image_id))['volume']104 image_np = np.reshape(image_np, (1, image_np.shape[0], image_np.shape[1], image_np.shape[2], 1))105 image_batch.append(image_np)106 label_np = sio.loadmat(data_train_path + "%s_label.mat" % (image_id))['label']107 label_np = np.reshape(label_np,108 (1, label_np.shape[0], label_np.shape[1], label_np.shape[2],109 label_np.shape[3]))110 label_batch.append(label_np)111 image_batch = np.concatenate(image_batch, axis=0)112 label_batch = np.concatenate(label_batch, axis=0)113 # _, loss_value, summary, global_step_show, pred, lr_show = sess.run(114 # [train, loss, merged_summary_op, global_step, prediction, lr],115 # feed_dict={vol_in: image_batch, labels: label_batch})116 _, loss_value, summary, global_step_show, lr_show = sess.run(117 [train, loss, merged_summary_op, global_step, lr],118 feed_dict={vol_in: image_batch, labels: label_batch})119 if (step + 1) % step_show == 0:120 total_loss += loss_value121 total_loss = total_loss / step_show122 # iou = pred['IoU']123 print('Step: %d, Learning rate: %f, Loss: %f, Running time: %f' %124 (global_step_show, lr_show, total_loss, time.time() - start_time))125 total_loss = 0126 writer.add_summary(summary, global_step=global_step_show)127 writer.flush()128 start_time = time.time()129 else:130 total_loss += loss_value131 # Testing after each epoch132 if (step + 1) % (train_num // batch_size) == 0:133 saved_path = saver.save(sess, save_path + 'Model', global_step=global_step)134 print('-------------------------------------------------')135 logfile.write('-------------------------------------------------\n')136 i_patient = np.zeros([10, output_channels - 1])137 u_patient = np.zeros([10, output_channels - 1])138 for i in range(test_num):139 volume_path = data_test_path + test_volume_list[i]140 label_path = data_test_path + test_label_list[i]141 assert volume_path[:volume_path.find('_volume.mat')] == label_path[142 :label_path.find('_label.mat')]143 volume_name = volume_path[:volume_path.find('_volume.mat')]144 patient_id = volume_name[volume_name.find('test_patient') + 12:]145 patient_id = int(patient_id[:patient_id.find('_')])146 volume_np = sio.loadmat(volume_path)['volume']147 volume_np = np.reshape(volume_np,148 (1, volume_np.shape[0], volume_np.shape[1], volume_np.shape[2], 1))149 label_np = sio.loadmat(label_path)['label']150 label_np = np.reshape(label_np,151 (1, label_np.shape[0], label_np.shape[1], label_np.shape[2],152 label_np.shape[3]))153 [pred, ] = sess.run([prediction], feed_dict={vol_in: volume_np, labels: label_np})154 i_value = pred['And']155 u_value = pred['Or']156 # Calculate IoU for each patient157 i_patient[patient_id - 1, :] += i_value158 u_patient[patient_id - 1, :] += u_value159 iou_all_patients = np.divide(i_patient, u_patient)160 for patient_id in range(10):161 msg = 'epoch %d, Testing IoU of each organ for patient %d: %s\n' % (162 (step + 1) // (train_num // batch_size), patient_id,163 ','.join(['%.3f' % n for n in iou_all_patients[patient_id, :]]))164 print(msg)165 logfile.write(msg)166 msg = 'epoch %d, Current loss: %f, Average testing IoU of each organ for all %d patients: %s\n' % (167 (step + 1) // (train_num // batch_size), loss_value, len(iou_all_patients),168 ','.join(['%.3f' % n for n in np.mean(iou_all_patients, axis=0)]))169 print(msg)170 logfile.write(msg)171 print('-------------------------------------------------')172 logfile.write('-------------------------------------------------\n\n')173 logfile.flush()174 start_time = time.time()175 # Save prediction after each epoch176 if (step + 1) % (train_num // batch_size) == 0:177 for i in range(len(test_volume_list)):178 volume_path = data_test_path + test_volume_list[i]179 label_path = data_test_path + test_label_list[i]180 assert volume_path[:volume_path.find('_volume.mat')] == label_path[181 :label_path.find('_label.mat')]182 volume_name = volume_path[:volume_path.find('_volume.mat')]183 volume_name = volume_name[volume_name.rfind('/') + 1:]184 indices = re.search('test_patient[0-9]+', volume_name).span()185 patient_name = volume_name[indices[0]:indices[1]]186 volume_np = sio.loadmat(volume_path)['volume']187 volume_np = np.reshape(volume_np,188 (1, volume_np.shape[0], volume_np.shape[1], volume_np.shape[2], 1))189 label_np = sio.loadmat(label_path)['label']190 label_np = np.reshape(label_np,191 (1, label_np.shape[0], label_np.shape[1], label_np.shape[2],192 label_np.shape[3]))193 [pred_dict, ] = sess.run([prediction], feed_dict={vol_in: volume_np, labels: label_np})194 prediction_path = save_path + 'prediction_' + str(195 (step + 1) // (train_num // batch_size)) + '/' + patient_name + '/'196 if not os.path.exists(prediction_path):197 os.makedirs(prediction_path)198 # sio.savemat(prediction_path + volume_name + '_prediction.mat',199 # {'probabilities': pred_dict['probabilities']})200 # sio.savemat(prediction_path + volume_name + '_label.mat', {'label': label_np})201 print('prediction complete.')...

Full Screen

Full Screen

vnet_with_2dunet_train.py

Source:vnet_with_2dunet_train.py Github

copy

Full Screen

1import numpy as np2import scipy.io as sio3import os, re, random, time4import tensorflow as tf5from vnet_with_2dunet import vnet6dataset = 'VISCERAL_multiple' # 'VISCERAL_multiple', 'SLIVER07_liver'7os.environ['CUDA_VISIBLE_DEVICES'] = '2'8cross_validation = 19restore = False10vol_size = [8, 512, 512]11if dataset == 'VISCERAL_multiple':12 data_train_path = '/media/xz6214/4276F10376F0F90D/datasets/VISCERAL/multiple_2fold_%d_%d_%d/train_%d/' % (13 vol_size[2], vol_size[1], vol_size[0], cross_validation)14 data_test_path = '/media/xz6214/4276F10376F0F90D/datasets/VISCERAL/multiple_2fold_%d_%d_%d/test_%d/' % (15 vol_size[2], vol_size[1], vol_size[0], cross_validation)16elif dataset == 'SLIVER07_liver': # TODO17 data_train_path = '/data2/XIAOYUN_ZHOU/SLiver07/liver_2fold/liver_train_%d/' % (cross_validation)18 data_test_path = '/data2/XIAOYUN_ZHOU/SLiver07/liver_2fold/liver_test_%d/' % (cross_validation)19train_volume_list = sorted([vol_name for vol_name in os.listdir(data_train_path) if vol_name.endswith('_volume.mat')])20train_label_list = sorted(21 [label_name for label_name in os.listdir(data_train_path) if label_name.endswith('_label.mat')])22test_volume_list = sorted([vol_name for vol_name in os.listdir(data_test_path) if vol_name.endswith('_volume.mat')])23test_label_list = sorted([label_name for label_name in os.listdir(data_test_path) if label_name.endswith('_label.mat')])24train_num = len(train_volume_list)25test_num = len(test_volume_list)26batch_size = 127epoch = 428feature_channels = 1629output_channels = 830step_show = 10031config = tf.ConfigProto()32config.gpu_options.allow_growth = True33vol_in = tf.placeholder(dtype='float32', shape=[None, vol_size[0], vol_size[1], vol_size[2], 1])34labels = tf.placeholder(dtype='float32', shape=[None, vol_size[0], vol_size[1], vol_size[2], output_channels])35prediction, logits = vnet(vol_in, labels, feature_channels=feature_channels, output_channels=output_channels)36loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.stop_gradient(labels), logits=logits))37tf.summary.scalar('Loss', loss)38# tf.summary.scalar('IoU', prediction['IoU'])39for lr_train in [0.1, 0.05, 0.01, 0.005]:40 boundary = [train_num // batch_size, train_num * 2 // batch_size]41 lr_values = [lr_train, lr_train / 2, lr_train / 10]42 save_path = '/media/xz6214/4276F10376F0F90D/trained_model/%s/%d_%d_%d/vnet_with_2dunet/model_lr_%f_crossval_%s/' % (43 dataset, vol_size[2], vol_size[1], vol_size[0], lr_train, cross_validation)44 if not restore:45 os.system('rm -rf %s' % save_path)46 if not os.path.exists(save_path):47 os.makedirs(save_path)48 logfile = open(save_path + 'training_log.txt', 'w+')49 logfile.write('********************* LR = %f *********************\n' % lr_train)50 global_step = tf.Variable(0, trainable=False)51 lr = tf.train.piecewise_constant(x=global_step, boundaries=boundary, values=lr_values)52 optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)53 train = optimizer.minimize(loss, global_step=global_step)54 tf.summary.scalar('Learning Rate', lr)55 merged_summary_op = tf.summary.merge_all()56 init = tf.global_variables_initializer()57 saver = tf.train.Saver(max_to_keep=epoch)58 with tf.Session(config=config) as sess:59 tf.train.write_graph(graph_or_graph_def=sess.graph_def, logdir=save_path, name='Model')60 writer = tf.summary.FileWriter(save_path, sess.graph)61 sess.run(init)62 total_loss = 063 total_parameters = 064 for variable in tf.trainable_variables():65 shape = variable.get_shape()66 variable_parameters = 167 for dim in shape:68 variable_parameters *= dim.value69 total_parameters += variable_parameters70 print(total_parameters)71 logfile.write('total parameters: %d\n' % total_parameters)72 if restore:73 ckpt = tf.train.get_checkpoint_state(save_path)74 if ckpt and ckpt.model_checkpoint_path:75 saver.restore(sess, ckpt.model_checkpoint_path)76 start_time = time.time()77 for step in range((train_num // batch_size) * epoch):78 image_id_list = random.sample(range(train_num), batch_size)79 image_batch = []80 label_batch = []81 for image_id in image_id_list:82 image_id = np.random.choice(train_num) + 183 image_np = sio.loadmat(data_train_path + "%s_volume.mat" % (image_id))['volume']84 image_np = np.reshape(image_np, (1, image_np.shape[0], image_np.shape[1], image_np.shape[2], 1))85 image_batch.append(image_np)86 lable_np = sio.loadmat(data_train_path + "%s_label.mat" % (image_id))['label']87 lable_np = np.reshape(lable_np,88 (1, lable_np.shape[0], lable_np.shape[1], lable_np.shape[2], lable_np.shape[3]))89 label_batch.append(lable_np)90 image_batch = np.concatenate(image_batch, axis=0)91 label_batch = np.concatenate(label_batch, axis=0)92 # _, loss_value, summary, global_step_show, pred, lr_show = sess.run(93 # [train, loss, merged_summary_op, global_step, prediction, lr],94 # feed_dict={vol_in: image_batch, labels: label_batch})95 _, loss_value, summary, global_step_show, lr_show = sess.run(96 [train, loss, merged_summary_op, global_step, lr],97 feed_dict={vol_in: image_batch, labels: label_batch})98 if (step + 1) % step_show == 0:99 total_loss += loss_value100 total_loss = total_loss / step_show101 # iou = pred['IoU']102 print('Step: %d, Learning rate: %f, Loss: %f, Running time: %f' %103 (global_step_show, lr_show, total_loss, time.time() - start_time))104 total_loss = 0105 writer.add_summary(summary, global_step=global_step_show)106 writer.flush()107 start_time = time.time()108 else:109 total_loss += loss_value110 # Testing after each epoch111 if (step + 1) % (train_num // batch_size) == 0:112 saved_path = saver.save(sess, save_path + 'Model', global_step=global_step)113 print('-------------------------------------------------')114 logfile.write('-------------------------------------------------\n')115 i_patient = np.zeros([10, output_channels - 1])116 u_patient = np.zeros([10, output_channels - 1])117 for i in range(test_num):118 volume_path = data_test_path + test_volume_list[i]119 label_path = data_test_path + test_label_list[i]120 assert volume_path[:volume_path.find('_volume.mat')] == label_path[121 :label_path.find('_label.mat')]122 volume_name = volume_path[:volume_path.find('_volume.mat')]123 patient_id = volume_name[volume_name.find('test_patient') + 12:]124 patient_id = int(patient_id[:patient_id.find('_')])125 volume_np = sio.loadmat(volume_path)['volume']126 volume_np = np.reshape(volume_np,127 (1, volume_np.shape[0], volume_np.shape[1], volume_np.shape[2], 1))128 label_np = sio.loadmat(label_path)['label']129 label_np = np.reshape(label_np,130 (1, label_np.shape[0], label_np.shape[1], label_np.shape[2],131 label_np.shape[3]))132 [pred, ] = sess.run([prediction], feed_dict={vol_in: volume_np, labels: label_np})133 i_value = pred['And']134 u_value = pred['Or']135 # Calculate IoU for each patient136 i_patient[patient_id - 1, :] += i_value137 u_patient[patient_id - 1, :] += u_value138 iou_all_patients = np.divide(i_patient, u_patient)139 print(iou_all_patients)140 for patient_id in range(10):141 msg = 'epoch %d, Testing IoU of each organ for patient %d: %s\n' % (142 (step + 1) // (train_num // batch_size), patient_id,143 ','.join(['%.3f' % n for n in iou_all_patients[patient_id, :]]))144 print(msg)145 logfile.write(msg)146 msg = 'epoch %d, Current loss: %f, Average testing IoU of each organ for all %d patients: %s\n' % (147 (step + 1) // (train_num // batch_size), loss_value, len(iou_all_patients),148 ','.join(['%.3f' % n for n in np.mean(iou_all_patients, axis=0)]))149 print(msg)150 logfile.write(msg)151 print('-------------------------------------------------')152 logfile.write('-------------------------------------------------\n\n')153 logfile.flush()154 start_time = time.time()155 # Save prediction after each epoch156 if (step + 1) % (train_num // batch_size) == 0:157 for i in range(len(test_volume_list)):158 volume_path = data_test_path + test_volume_list[i]159 label_path = data_test_path + test_label_list[i]160 assert volume_path[:volume_path.find('_volume.mat')] == label_path[161 :label_path.find('_label.mat')]162 volume_name = volume_path[:volume_path.find('_volume.mat')]163 volume_name = volume_name[volume_name.rfind('/') + 1:]164 indices = re.search('test_patient[0-9]+', volume_name).span()165 patient_name = volume_name[indices[0]:indices[1]]166 volume_np = sio.loadmat(volume_path)['volume']167 volume_np = np.reshape(volume_np,168 (1, volume_np.shape[0], volume_np.shape[1], volume_np.shape[2], 1))169 label_np = sio.loadmat(label_path)['label']170 label_np = np.reshape(label_np,171 (1, label_np.shape[0], label_np.shape[1], label_np.shape[2],172 label_np.shape[3]))173 [pred_dict, ] = sess.run([prediction], feed_dict={vol_in: volume_np, labels: label_np})174 prediction_path = save_path + 'prediction_' + str(175 (step + 1) // (train_num // batch_size)) + '/' + patient_name + '/'176 if not os.path.exists(prediction_path):177 os.makedirs(prediction_path)178 sio.savemat(prediction_path + volume_name + '_prediction.mat',179 {'probabilities': pred_dict['probabilities']})180 sio.savemat(prediction_path + volume_name + '_label.mat', {'label': label_np})181 print('prediction complete.')182 logfile.close()183def tversky_loss(gt, prob, alpha=0.5, beta=0.5):184 p0 = prob185 p1 = tf.ones(gt.shape) - prob186 g0 = gt187 g1 = tf.ones(gt.shape) - gt188 num = tf.reduce_sum(p0 * g0, axis=(0, 1, 2, 3))189 den = num + alpha * tf.reduce_sum(p0 * g1, axis=(0, 1, 2, 3)) + beta * tf.reduce_sum(p1 * g0, axis=(0, 1, 2, 3))190 t = tf.reduce_sum(num / den)...

Full Screen

Full Screen

depthwise_dilated_unet_train.py

Source:depthwise_dilated_unet_train.py Github

copy

Full Screen

1import numpy as np2import scipy.io as sio3import os, re, random, time4import tensorflow as tf5from depthwise_dilated_unet import DDUNet6dataset = 'VISCERAL_aorta' # 'VISCERAL_aorta', 'SLIVER07_liver'7os.environ['CUDA_VISIBLE_DEVICES'] = '2'8cross_validation = 19restore = False10vol_size = [8, 512, 512]11if dataset == 'VISCERAL_aorta':12 data_train_path = '/data2/PEICHAO_LI/data/VISCERAL/aorta_2fold_augmented_%d_%d_%d/aorta_train_%d/' % (13 vol_size[2], vol_size[1], vol_size[0], cross_validation)14 data_test_path = '/data2/PEICHAO_LI/data/VISCERAL/aorta_2fold_augmented_%d_%d_%d/aorta_test_%d/' % (15 vol_size[2], vol_size[1], vol_size[0], cross_validation)16elif dataset == 'SLIVER07_liver': # TODO17 data_train_path = '/data2/XIAOYUN_ZHOU/SLiver07/liver_2fold/liver_train_%d/' % (cross_validation)18 data_test_path = '/data2/XIAOYUN_ZHOU/SLiver07/liver_2fold/liver_test_%d/' % (cross_validation)19train_volume_list = sorted([vol_name for vol_name in os.listdir(data_train_path) if vol_name.endswith('_volume.mat')])20train_label_list = sorted([label_name for label_name in os.listdir(data_train_path) if label_name.endswith('_label.mat')])21test_volume_list = sorted([vol_name for vol_name in os.listdir(data_test_path) if vol_name.endswith('_volume.mat')])22test_label_list = sorted([label_name for label_name in os.listdir(data_test_path) if label_name.endswith('_label.mat')])23train_num = len(train_volume_list)24test_num = len(test_volume_list)25batch_size = 126epoch = 1227feature_channels = 1628output_channels = 229downsampling = 630downsampling_type = 'conv' # 'conv', 'max_pooling'31upsampling_type = 'bilinear' # 'deconv', 'nearest_neighbour', 'bilinear'32norm_type = 'IN' # 'IN', 'LN', 'BN'33step_show = 10034config = tf.ConfigProto()35config.gpu_options.allow_growth = True36vol_in = tf.placeholder(dtype='float32', shape=[batch_size, vol_size[0], vol_size[1], vol_size[2], 1])37labels = tf.placeholder(dtype='float32', shape=[batch_size, vol_size[0], vol_size[1], vol_size[2], 2])38model = DDUNet(vol_in,39 labels,40 feature_channels,41 output_channels,42 downsampling,43 downsampling_type,44 upsampling_type,45 norm_type)46prediction = model.prediction47logits = model.logits48loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.stop_gradient(labels), logits=logits))49tf.summary.scalar('Loss', loss)50tf.summary.scalar('IoU', prediction['IoU'])51for lr_train in [0.1, 0.05, 0.01, 0.005]:52 boundary = [train_num//batch_size, train_num*4//batch_size, train_num*7//batch_size]53 lr_values = [lr_train, lr_train / 2, lr_train / 10, lr_train / 50]54 save_path = '/media/xz6214/4276F10376F0F90D/trained_model/%s/%d_%d_%d/ddunet/model_lr_%f_crossval_%s/' % (55 dataset, vol_size[2], vol_size[1], vol_size[0], lr_train, cross_validation)56 if not restore:57 os.system('rm -rf %s' % save_path)58 if not os.path.exists(save_path):59 os.makedirs(save_path)60 logfile = open(save_path + 'training_log.txt', 'w+')61 logfile.write('********************* LR = %f *********************\n' % lr_train)62 global_step = tf.Variable(0, trainable=False)63 lr = tf.train.piecewise_constant(x=global_step, boundaries=boundary, values=lr_values)64 optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)65 train = optimizer.minimize(loss, global_step=global_step)66 tf.summary.scalar('Learning Rate', lr)67 merged_summary_op = tf.summary.merge_all()68 init = tf.global_variables_initializer()69 saver = tf.train.Saver(max_to_keep=3)70 with tf.Session(config=config) as sess:71 tf.train.write_graph(graph_or_graph_def=sess.graph_def, logdir=save_path, name='Model')72 writer = tf.summary.FileWriter(save_path, sess.graph)73 sess.run(init)74 total_loss = 075 total_parameters = 076 for variable in tf.trainable_variables():77 shape = variable.get_shape()78 variable_parameters = 179 for dim in shape:80 variable_parameters *= dim.value81 total_parameters += variable_parameters82 print(total_parameters)83 logfile.write('total parameters: %d\n' % total_parameters)84 if restore:85 ckpt = tf.train.get_checkpoint_state(save_path)86 if ckpt and ckpt.model_checkpoint_path:87 saver.restore(sess, ckpt.model_checkpoint_path)88 start_time = time.time()89 for step in range((train_num//batch_size)*epoch):90 image_id_list = random.sample(range(train_num), batch_size)91 image_batch = []92 label_batch = []93 for image_id in image_id_list:94 image_id = np.random.choice(train_num) + 195 image_np = sio.loadmat(data_train_path + "%s_volume.mat" % (image_id))['volume']96 image_np = np.reshape(image_np, (1, image_np.shape[0], image_np.shape[1], image_np.shape[2], 1))97 image_batch.append(image_np)98 lable_np = sio.loadmat(data_train_path + "%s_label.mat" % (image_id))['label']99 lable_np = np.reshape(lable_np, (1, lable_np.shape[0], lable_np.shape[1], lable_np.shape[2], lable_np.shape[3]))100 label_batch.append(lable_np)101 image_batch = np.concatenate(image_batch, axis=0)102 label_batch = np.concatenate(label_batch, axis=0)103 _, loss_value, summary, global_step_show, pred, lr_show = sess.run([train, loss, merged_summary_op, global_step, prediction, lr], feed_dict={vol_in: image_batch, labels: label_batch})104 if (step+1) % step_show == 0:105 total_loss += loss_value106 total_loss = total_loss/step_show107 iou = pred['IoU']108 print('Step: %d, Learning rate: %f, Loss: %f, Running time: %f' %109 (global_step_show, lr_show, total_loss, time.time() - start_time))110 total_loss = 0111 writer.add_summary(summary, global_step=global_step_show)112 writer.flush()113 start_time = time.time()114 else:115 total_loss += loss_value116 # Testing after each epoch117 if (step+1) % (train_num//batch_size) == 0:118 saved_path = saver.save(sess, save_path + 'Model', global_step=global_step)119 print('-------------------------------------------------')120 logfile.write('-------------------------------------------------\n')121 i_patient = np.zeros(10)122 u_patient = np.zeros(10)123 for i in range(len(test_volume_list)):124 volume_path = data_test_path + test_volume_list[i]125 label_path = data_test_path + test_label_list[i]126 assert volume_path[:volume_path.find('_volume.mat')] == label_path[:label_path.find('_label.mat')]127 volume_name = volume_path[:volume_path.find('_volume.mat')]128 patient_id = volume_name[volume_name.find('test_patient') + 12:]129 patient_id = int(patient_id[:patient_id.find('_')])130 volume_np = sio.loadmat(volume_path)['volume']131 volume_np = np.reshape(volume_np,132 (1, volume_np.shape[0], volume_np.shape[1], volume_np.shape[2], 1))133 label_np = sio.loadmat(label_path)['label']134 label_np = np.reshape(label_np,135 (1, label_np.shape[0], label_np.shape[1], label_np.shape[2],136 label_np.shape[3]))137 [pred, ] = sess.run([prediction], feed_dict={vol_in: volume_np, labels: label_np})138 i_value = pred['And']139 u_value = pred['Or']140 # Calculate IoU for each patient141 i_patient[patient_id-1] += np.sum(i_value)142 u_patient[patient_id-1] += np.sum(u_value)143 iou_list = np.divide(i_patient, u_patient)144 assert len(iou_list) == 10145 for patient_id in range(10):146 msg = 'epoch %d, Testing IoU for patient %d: %f\n' % ((step+1) // (train_num//batch_size), patient_id, iou_list[patient_id])147 print(msg)148 logfile.write(msg)149 msg = 'epoch %d, Current loss: %f, Average testing IoU for all %d patients: %f\n' % ((step+1) // (train_num//batch_size), loss_value, len(iou_list), np.mean(iou_list))150 print(msg)151 logfile.write(msg)152 print('-------------------------------------------------')153 logfile.write('-------------------------------------------------\n\n')154 logfile.flush()155 start_time = time.time()156 # Save prediction after each epoch157 if (step + 1) % (train_num // batch_size) == 0:158 for i in range(len(test_volume_list)):159 volume_path = data_test_path + test_volume_list[i]160 label_path = data_test_path + test_label_list[i]161 assert volume_path[:volume_path.find('_volume.mat')] == label_path[162 :label_path.find('_label.mat')]163 volume_name = volume_path[:volume_path.find('_volume.mat')]164 volume_name = volume_name[volume_name.rfind('/') + 1:]165 indices = re.search('test_patient[0-9]+', volume_name).span()166 patient_name = volume_name[indices[0]:indices[1]]167 volume_np = sio.loadmat(volume_path)['volume']168 volume_np = np.reshape(volume_np,169 (1, volume_np.shape[0], volume_np.shape[1], volume_np.shape[2], 1))170 label_np = sio.loadmat(label_path)['label']171 label_np = np.reshape(label_np,172 (1, label_np.shape[0], label_np.shape[1], label_np.shape[2],173 label_np.shape[3]))174 [pred_dict, ] = sess.run([prediction], feed_dict={vol_in: volume_np, labels: label_np})175 prediction_path = save_path + 'prediction_' + str(176 (step + 1) // (train_num // batch_size)) + '/' + patient_name + '/'177 if not os.path.exists(prediction_path):178 os.makedirs(prediction_path)179 sio.savemat(prediction_path + volume_name + '_prediction.mat',180 {'probabilities': pred_dict['probabilities']})181 sio.savemat(prediction_path + volume_name + '_label.mat', {'label': label_np})182 print('prediction complete.')...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful