How to use parallelized method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

test_dygraph_models_class_method_parallel.py

Source:test_dygraph_models_class_method_parallel.py Github

copy

Full Screen

...71 logging.info("example #{}:".format(index))72 logging.info("label: {}".format(label_name))73 logging.info("text: {}".format(text.encode("utf-8")))74 logging.info("token_ids: {}".format(token_ids))75 def test_textcnn_parallelized(self):76 # ¶à¿¨ÔËÐÐʱ embedding²ãµÄis_sparse²ÎÊýÐèҪΪFalse ÒâΪÌݶȸüÐÂʱ²»Ê¹ÓÃÏ¡Êè¸üÐÂ77 # ÒòΪ¶à¿¨ÑµÁ·ÔÚÊÕ¼¯ÌݶÈʱ²»ÄÜ´¦ÀíÏ¡ÊèÌݶÈ78 textcnn_config = {79 "num_class": TestDygraphModelsParallelized.label_encoder.size(),80 "vocab_size": TestDygraphModelsParallelized.tokenizer.size(),81 "emb_dim" : 512,82 "num_filters": 256,83 "fc_hid_dim": 512,84 "num_channels":1,85 "win_size_list": [3],86 "is_sparse": False,87 "use_cudnn": True,88 }89 run_config = {90 "model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "textcnn"),91 "best_model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "textcnn_best"),92 "epochs": 2,93 "batch_size": 32,94 "learning_rate": 5e-4,95 "max_seq_len": 300,96 "print_step": 200,97 "load_best_model": True,98 }99 start_time = time.time()100 class TextCNNModel(ClassificationModel):101 @model_parallelized(TestDygraphModelsParallelized.strategy)102 def build(self, **model_config):103 self.model = TextCNNClassifier(**model_config)104 self.built = True105 place = F.CUDAPlace(D.ParallelEnv().dev_id)106 with D.guard(place):107 textcnn_model = TextCNNModel()108 textcnn_model.build(**textcnn_config)109 best_acc = textcnn_model.train(110 TestDygraphModelsParallelized.train_data, TestDygraphModelsParallelized.eval_data,111 label_encoder=TestDygraphModelsParallelized.label_encoder,112 **run_config)113 logging.warning("textcnn parallelized best train score: {}, cost time: {}s".format(best_acc, time.time()- start_time))114 def test_gru_parallelized(self):115 gru_config = {116 "num_class": TestDygraphModelsParallelized.label_encoder.size(),117 "vocab_size": TestDygraphModelsParallelized.tokenizer.size(),118 "emb_dim" : 512,119 "gru_dim" : 256,120 "fc_hid_dim": 512,121 "is_sparse": False,122 "bi_direction": True,123 }124 run_config = {125 "model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "gru"),126 "best_model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "gru_best"),127 "epochs": 2,128 "batch_size": 32,129 "max_seq_len": 300,130 "print_step": 200,131 "learning_rate": 5e-4,132 }133 start_time = time.time()134 class GRUModel(ClassificationModel):135 @model_parallelized(TestDygraphModelsParallelized.strategy)136 def build(self, **model_config):137 self.model = GRUClassifier(**model_config)138 self.built = True139 place = F.CUDAPlace(D.ParallelEnv().dev_id)140 with D.guard(place):141 gru_model = GRUModel()142 gru_model.build(**gru_config)143 best_acc = gru_model.train(144 TestDygraphModelsParallelized.train_data, TestDygraphModelsParallelized.eval_data,145 label_encoder=TestDygraphModelsParallelized.label_encoder,146 **run_config)147 logging.warning("gru parallelized best train score: {}, cost time: {}s".format(best_acc, time.time()- start_time))148 def test_lstm_parallelized(self):149 lstm_config = {150 "num_class": TestDygraphModelsParallelized.label_encoder.size(),151 "vocab_size": TestDygraphModelsParallelized.tokenizer.size(),152 "emb_dim" : 512,153 "lstm_dim" : 256,154 "fc_hid_dim": 512,155 "is_sparse": False,156 "bi_direction": True,157 }158 run_config = {159 "model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "lstm"),160 "best_model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "lstm_best"),161 "epochs": 2,162 "batch_size": 32,163 "max_seq_len": 300,164 "print_step": 200,165 "learning_rate": 5e-4,166 }167 start_time = time.time()168 class LSTMModel(ClassificationModel):169 @model_parallelized(TestDygraphModelsParallelized.strategy)170 def build(self, **model_config):171 self.model = DynamicLSTMClassifier(**model_config)172 self.built = True173 place = F.CUDAPlace(D.ParallelEnv().dev_id)174 with D.guard(place):175 lstm_model = LSTMModel()176 lstm_model.build(**lstm_config)177 best_acc = lstm_model.train(178 TestDygraphModelsParallelized.train_data, TestDygraphModelsParallelized.eval_data,179 label_encoder=TestDygraphModelsParallelized.label_encoder,180 **run_config)181 logging.warning("lstm parallelized best train score: {}, cost time: {}s".format(best_acc, time.time()- start_time))182 def test_ernie_parallelized(self):183 ernie_config = {184 "pretrain_dir_or_url": "ernie-1.0",185 "num_labels": TestDygraphModelsParallelized.label_encoder.size(),186 }187 run_config = {188 "model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "ernie"),189 "best_model_save_path": os.path.join(TestDygraphModelsParallelized.test_output_dir, "ernie_best"),190 "epochs": 2,191 "batch_size": 32,192 "max_seq_len": 300,193 "print_step": 100,194 "learning_rate": 5e-5,195 "load_best_model": False,196 }197 start_time = time.time()198 class ErnieClassificationModel(ClassificationModel):199 @model_parallelized(TestDygraphModelsParallelized.strategy)200 def build(self, **model_config):201 self.model = ErnieSequenceClassificationCustomized.from_pretrained(**model_config)202 self.built = True203 place = F.CUDAPlace(D.ParallelEnv().dev_id)204 with D.guard(place):205 ernie_classification_model = ErnieClassificationModel()206 ernie_classification_model.build(**ernie_config)207 best_acc = ernie_classification_model.train(208 TestDygraphModelsParallelized.train_data, TestDygraphModelsParallelized.eval_data,209 label_encoder=TestDygraphModelsParallelized.label_encoder,210 **run_config)211 logging.warning("ernie parallelized best train score: {}, cost time: {}s".format(best_acc, time.time()- start_time))212if __name__ == "__main__":213 # ÔËÐÐËùÓвâÊÔÓÃÀý...

Full Screen

Full Screen

display_chart.py

Source:display_chart.py Github

copy

Full Screen

1import matplotlib.pyplot as plt2def extract_file_column(file_name, column_index):3 column_values = [] 4 # Abrindo um arquivo em modo de leitura:5 with open("{0}.txt".format(file_name),'r') as file:6 all_file_lines = file.read().splitlines()7 for each_line in all_file_lines:8 # Separa cada linha em uma lista com9 # cada um dos três dados que cada uma contém10 data = each_line.split(' ')11 column_values.append(float(data[column_index]))12 return column_values13def extract_file_line(file_name, line_index):14 column_values = [] 15 # Abrindo um arquivo em modo de leitura:16 with open("{0}.txt".format(file_name),'r') as file:17 all_file_lines = file.read().splitlines()18 line_values = all_file_lines[line_index].split(' ')19 return line_values20def comparison_technics(number_of_threads):21 data_sizes = extract_file_column("recorded durations/{}threads_times_original".format(number_of_threads), 0)22 original_durations = extract_file_column("recorded durations/{}threads_times_original".format(number_of_threads), 1)23 optimized_durations = extract_file_column("recorded durations/{}threads_times_optimized".format(number_of_threads), 1)24 parallelized_durations = extract_file_column("recorded durations/{}threads_times_parallelized".format(number_of_threads), 1)25 optimized_and_parallelized_durations = extract_file_column("recorded durations/{}threads_times_optimized_and_parallelized".format(number_of_threads), 1)26 # Monta o gráfico com as listas obtidas:27 plt.plot(data_sizes, original_durations, color="m", label="Original", linewidth=2.0)28 plt.plot(data_sizes, optimized_durations, color="b", label="Otimizado", linewidth=2.0)29 plt.plot(data_sizes, parallelized_durations, color="c", label="Paralelizado", linewidth=2.0)30 plt.plot(data_sizes, optimized_and_parallelized_durations, color="g", label="Paralelizado e Otimizado", linewidth=2.0)31 plt.legend(loc='upper left', frameon=False)32 plt.xlabel('Tamanho dos Dados')33 plt.ylabel('Tempo (s)')34 plt.xlim(100, 1400)35 plt.ylim(0, 0.2)36 plt.title("Gráfico de Tempo de Execução por Técnica Aplicada no código ({} Threads)".format(number_of_threads))37 38 # Exibe o gráfico na tela:39 plt.show()40def speedup(line_index):41 data_size = extract_file_line("recorded durations/4threads_times_original", line_index)[0]42 t2_original_duration = extract_file_line("recorded durations/2threads_times_original", line_index)[1]43 t2_optimized_duration = extract_file_line("recorded durations/2threads_times_optimized", line_index)[1]44 t2_parallelized_duration = extract_file_line("recorded durations/2threads_times_parallelized", line_index)[1]45 t2_optimized_and_parallelized_duration = extract_file_line("recorded durations/2threads_times_optimized_and_parallelized", line_index)[1]46 t4_original_duration = extract_file_line("recorded durations/4threads_times_original", line_index)[1]47 t4_optimized_duration = extract_file_line("recorded durations/4threads_times_optimized", line_index)[1]48 t4_parallelized_duration = extract_file_line("recorded durations/4threads_times_parallelized", line_index)[1]49 t4_optimized_and_parallelized_duration = extract_file_line("recorded durations/4threads_times_optimized_and_parallelized", line_index)[1]50 t8_original_duration = extract_file_line("recorded durations/8threads_times_original", line_index)[1]51 t8_optimized_duration = extract_file_line("recorded durations/8threads_times_optimized", line_index)[1]52 t8_parallelized_duration = extract_file_line("recorded durations/8threads_times_parallelized", line_index)[1]53 t8_optimized_and_parallelized_duration = extract_file_line("recorded durations/8threads_times_optimized_and_parallelized", line_index)[1]54 t16_original_duration = extract_file_line("recorded durations/16threads_times_original", line_index)[1]55 t16_optimized_duration = extract_file_line("recorded durations/16threads_times_optimized", line_index)[1]56 t16_parallelized_duration = extract_file_line("recorded durations/16threads_times_parallelized", line_index)[1]57 t16_optimized_and_parallelized_duration = extract_file_line("recorded durations/16threads_times_optimized_and_parallelized", line_index)[1]58 speedup_between_original_and_parallel = [float(t2_original_duration) /float(t2_parallelized_duration) ,59 float(t4_original_duration) /float(t4_parallelized_duration) ,60 float(t8_original_duration) /float(t8_parallelized_duration) ,61 float(t16_original_duration)/float(t16_parallelized_duration)]62 63 speedup_between_optimizeds = [float(t2_optimized_duration) /float(t2_optimized_and_parallelized_duration) ,64 float(t4_optimized_duration) /float(t4_optimized_and_parallelized_duration) ,65 float(t8_optimized_duration) /float(t8_optimized_and_parallelized_duration) ,66 float(t16_optimized_duration)/float(t16_optimized_and_parallelized_duration)]67 # Monta o gráfico com as listas obtidas:68 plt.plot([2, 4, 8, 16], speedup_between_original_and_parallel, color="b", label="Original e Paralelizado", linewidth=2.0)69 plt.plot([2, 4, 8, 16], speedup_between_optimizeds, color="c", label="Otimizado Original e Paralelizado", linewidth=2.0)70 plt.legend(loc='upper left', frameon=False)71 plt.xlabel('Número de Threads')72 plt.ylabel('Speedup')73 plt.xlim(2, 16)74 plt.ylim(1, 3)75 plt.title("Gráfico do ganho de Speedup (Tamanho do Array igual a {0}x{0})".format(str(data_size)))76 77 # Exibe o gráfico na tela:...

Full Screen

Full Screen

ray_test.py

Source:ray_test.py Github

copy

Full Screen

1#!/usr/bin/env python2# -*- coding: UTF-8 -*-3# REF [site] >> https://github.com/ray-project/ray4import time5import ray6import tensorflow as tf7ray.init()8@ray.remote9def func():10 time.sleep(1)11 return 112def simple_example():13 results = ray.get([func.remote() for i in range(4)])14 print('Result =', results)15@ray.remote16def sqr(x):17 time.sleep(1)18 return x * x19def parallelized_map(func, args):20 return list(func.remote(arg) for arg in args)21def parallelized_map_example():22 # Call parallelized_map() on a list.23 result_ids = parallelized_map(sqr, range(1, 6))24 # Get the results.25 results = ray.get(result_ids)26 print('Result =', results)27@ray.remote28def negative(x):29 time.sleep(1)30 return x < 031def parallelized_filter(func, args):32 return list(arg for arg in args if func.remote(arg))33def parallelized_filter_example():34 # Call parallelized_filter() on a list.35 result_ids = parallelized_filter(negative, range(-5, 5))36 # Get the results.37 results = ray.get(result_ids)38 print('Result =', results)39@ray.remote40class Simulator(object):41 def __init__(self):42 self.sess = tf.Session()43 self.simple_model = tf.constant([1.0])44 def simulate(self):45 return self.sess.run(self.simple_model)46def simple_tensorflow_example():47 # Create two actors.48 simulators = [Simulator.remote() for _ in range(2)]49 # Run two simulations in parallel.50 results = ray.get([s.simulate.remote() for s in simulators])51 print(results)52def main():53 #simple_example()54 parallelized_map_example()55 #parallelized_filter_example()56 #simple_tensorflow_example()57#--------------------------------------------------------------------58if '__main__' == __name__:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful