How to use generate_runner method in lisa

Best Python code snippet using lisa_python

generate_layers_tests.py

Source:generate_layers_tests.py Github

copy

Full Screen

1# Copyright 2021 The IREE Authors2#3# Licensed under the Apache License v2.0 with LLVM Exceptions.4# See https://llvm.org/LICENSE.txt for license information.5# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception6# Generates runner variants for the tf math tests.7import generate_runner8# These layers were selected by:9# 1. Getting all subclasses of `tf.keras.layers.Layer`10# 2. Removing deperacated layers based on the tf.keras docs11# 3. Removing irrelevant layers12# 4. Removing layers that don't fit in the testing framework (Wrappers, DenseFeatures, ...)13LAYERS = [14 "Activation",15 "ActivityRegularization",16 "Add",17 "AdditiveAttention",18 "AlphaDropout",19 "Attention",20 "Average",21 "AveragePooling1D",22 "AveragePooling2D",23 "AveragePooling3D",24 "BatchNormalization",25 "Concatenate",26 "Conv1D",27 "Conv1DTranspose",28 "Conv2D",29 "Conv2DTranspose",30 "Conv3D",31 "Conv3DTranspose",32 # "ConvLSTM2D", # TODO(meadowlark): Debug flakiness.33 "Cropping1D",34 "Cropping2D",35 "Cropping3D",36 "Dense",37 "DepthwiseConv2D",38 "Dot",39 "Dropout",40 "ELU",41 "Embedding",42 "Flatten",43 "GRU",44 "GaussianDropout",45 "GaussianNoise",46 "GlobalAveragePooling1D",47 "GlobalAveragePooling2D",48 "GlobalAveragePooling3D",49 "GlobalMaxPool1D",50 "GlobalMaxPool2D",51 "GlobalMaxPool3D",52 "InputLayer",53 "LSTM",54 "Lambda",55 "LayerNormalization",56 "LeakyReLU",57 "LocallyConnected1D",58 "LocallyConnected2D",59 "Masking",60 "MaxPool1D",61 "MaxPool2D",62 "MaxPool3D",63 "Maximum",64 "Minimum",65 "MultiHeadAttention",66 "Multiply",67 "PReLU",68 "Permute",69 "ReLU",70 "RepeatVector",71 "Reshape",72 "SeparableConv1D",73 "SeparableConv2D",74 # "SimpleRNN", # TODO(meadowlark): Debug flakiness.75 "Softmax",76 "SpatialDropout1D",77 "SpatialDropout2D",78 "SpatialDropout3D",79 "Subtract",80 "ThresholdedReLU",81 "UpSampling1D",82 "UpSampling2D",83 "UpSampling3D",84 "ZeroPadding1D",85 "ZeroPadding2D",86 "ZeroPadding3D",87]88# A list of all layers with non-default api tests can be generated by running:89# bazel run integrations/tensorflow/e2e/keras/layers:layers_test_manual -- \90# --list_layers_with_full_api_tests91LAYERS_WITH_FULL_API_TESTS = [92 "ActivityRegularization",93 "AdditiveAttention",94 "Attention",95 "AveragePooling1D",96 "AveragePooling2D",97 "AveragePooling3D",98 "BatchNormalization",99 "Concatenate",100 "Conv1D",101 "Conv1DTranspose",102 "Conv2D",103 "Conv2DTranspose",104 "Conv3D",105 "Conv3DTranspose",106 # "ConvLSTM2D", # TODO(meadowlark): Debug flakiness.107 "Cropping1D",108 "Cropping2D",109 "Cropping3D",110 "DepthwiseConv2D",111 "GRU",112 "LSTM",113 "LocallyConnected1D",114 "LocallyConnected2D",115 "MaxPool1D",116 "MaxPool2D",117 "MaxPool3D",118 "SeparableConv1D",119 "SeparableConv2D",120 "SimpleRNN",121 # "SimpleRNN", # TODO(meadowlark): Debug flakiness.122]123# Layers that mention a training kwarg in their doc.124LAYERS_WITH_TRAINING_BEHAVIOR = [125 "AdditiveAttention",126 "AlphaDropout",127 "Attention",128 "BatchNormalization",129 # "ConvLSTM2D", # TODO(meadowlark): Debug flakiness.130 "Dropout",131 "GRU",132 "GaussianDropout",133 "GaussianNoise",134 "LSTM",135 "MultiHeadAttention",136 # "SimpleRNN", # TODO(meadowlark): Debug flakiness.137 "SpatialDropout1D",138 "SpatialDropout2D",139 "SpatialDropout3D",140]141BACKENDS = [142 ("llvmaot", "--target_backends=iree_llvmaot"),143 ("vulkan", "--target_backends=iree_vulkan"),144]145# Non dynamic dim tests.146for variant, flags in BACKENDS:147 for layer in LAYERS:148 # Static.149 generate_runner.main([150 variant,151 (f"{flags} --dynamic_dims=false --training=false "152 f"--test_default_kwargs_only=true --layer={layer} --artifacts_dir=%t"),153 f"iree_tf_tests/layers/layers_test.py:{layer}"154 ])155 # Dynamic.156 generate_runner.main([157 variant,158 (f"{flags} --dynamic_dims=true --training=false "159 f"--test_default_kwargs_only=true --layer={layer} --artifacts_dir=%t"),160 f"iree_tf_tests/layers/layers_test.py:dynamic_dims_{layer}"161 ])162 # Test with test_default_kwargs_only=false163 for layer in LAYERS_WITH_FULL_API_TESTS:164 generate_runner.main([165 variant,166 (f"{flags} --dynamic_dims=false --training=false "167 f"--test_default_kwargs_only=false --layer={layer} --artifacts_dir=%t"168 ), f"iree_tf_tests/layers/layers_test.py:full_api_{layer}"169 ])170 # Test with training flags.171 for layer in LAYERS_WITH_TRAINING_BEHAVIOR:172 generate_runner.main([173 variant,174 (f"{flags} --dynamic_dims=false --training=true "175 f"--test_default_kwargs_only=true --layer={layer} --artifacts_dir=%t"),176 f"iree_tf_tests/layers/layers_test.py:training_{layer}"...

Full Screen

Full Screen

generate_math_tests.py

Source:generate_math_tests.py Github

copy

Full Screen

1# Copyright 2021 The IREE Authors2#3# Licensed under the Apache License v2.0 with LLVM Exceptions.4# See https://llvm.org/LICENSE.txt for license information.5# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception6# Generates runner variants for the tf math tests.7import generate_runner8TF_MATH_FUNCTIONS = [9 "abs",10 "accumulate_n",11 "acos",12 "acosh",13 "add",14 "add_n",15 "angle",16 "argmax",17 "argmin",18 "asin",19 "asinh",20 "atan",21 "atan2",22 "atanh",23 "bessel_i0",24 "bessel_i0e",25 "bessel_i1",26 "bessel_i1e",27 "betainc",28 "bincount",29 "ceil",30 "confusion_matrix",31 "cos",32 "cosh",33 "count_nonzero",34 "cumprod",35 "cumsum",36 "cumulative_logsumexp",37 "digamma",38 "divide",39 "divide_no_nan",40 "equal",41 "erf",42 "erfc",43 "erfinv",44 "exp",45 "expm1",46 "floor",47 "floordiv",48 "floormod",49 "greater",50 "greater_equal",51 "igamma",52 "igammac",53 "imag",54 "in_top_k",55 "invert_permutation",56 "is_finite",57 "is_inf",58 "is_nan",59 "is_non_decreasing",60 "is_strictly_increasing",61 "lbeta",62 "less",63 "less_equal",64 "lgamma",65 "log",66 "log1p",67 "log_sigmoid",68 "log_softmax",69 "logical_and",70 "logical_not",71 "logical_or",72 "logical_xor",73 "maximum",74 "minimum",75 "mod",76 "multiply",77 "multiply_no_nan",78 "ndtri",79 "negative",80 "nextafter",81 "not_equal",82 "polygamma",83 "polyval",84 "pow",85 "real",86 "reciprocal",87 "reciprocal_no_nan",88 "reduce_all",89 "reduce_any",90 "reduce_euclidean_norm",91 "reduce_logsumexp",92 "reduce_max",93 "reduce_mean",94 "reduce_min",95 "reduce_prod",96 "reduce_std",97 "reduce_sum",98 "reduce_variance",99 "rint",100 "round",101 "rsqrt",102 "scalar_mul",103 "segment_max",104 "segment_mean",105 "segment_min",106 "segment_prod",107 "segment_sum",108 "sigmoid",109 "sign",110 "sin",111 "sinh",112 "sobol_sample",113 "softmax",114 "softplus",115 "softsign",116 "sqrt",117 "square",118 "squared_difference",119 "subtract",120 "tan",121 "tanh",122 "top_k",123 "truediv",124 "unsorted_segment_max",125 "unsorted_segment_mean",126 "unsorted_segment_min",127 "unsorted_segment_prod",128 "unsorted_segment_sqrt_n",129 "unsorted_segment_sum",130 "xdivy",131 "xlog1py",132 "xlogy",133 "zero_fraction",134 "zeta",135]136# This list was generated by running:137# bazel run integrations/tensorflow/e2e/math:math_test_manual -- --list_functions_with_complex_tests138# keep sorted139COMPLEX_FUNCTIONS = [140 "abs",141 "add",142 "angle",143 "asinh",144 "atanh",145 "conj",146 "cos",147 "cosh",148 "count_nonzero",149 "cumprod",150 "cumsum",151 "divide",152 "divide_no_nan",153 "exp",154 "expm1",155 "imag",156 "l2_normalize",157 "log",158 "log1p",159 "multiply",160 "multiply_no_nan",161 "negative",162 "pow",163 "real",164 "reciprocal",165 "reciprocal_no_nan",166 "reduce_euclidean_norm",167 "reduce_std",168 "reduce_variance",169 "rsqrt",170 "sigmoid",171 "sign",172 "sin",173 "sinh",174 "sqrt",175 "square",176 "squared_difference",177 "subtract",178 "tan",179 "tanh",180 "truediv",181 "xdivy",182 "xlog1py",183 "xlogy",184 "zero_fraction",185]186BACKENDS = [187 ("llvmaot", "--target_backends=iree_llvmaot"),188 ("vulkan", "--target_backends=iree_vulkan"),189]190# Non dynamic dim tests.191for variant, flags in BACKENDS:192 for math_fn in TF_MATH_FUNCTIONS:193 generate_runner.main([194 variant,195 f"{flags} --dynamic_dims=false --functions={math_fn} --artifacts_dir=%t",196 f"iree_tf_tests/math/math_test.py:{math_fn}"197 ])198 for math_fn in COMPLEX_FUNCTIONS:199 generate_runner.main([200 variant,201 f"{flags} --dynamic_dims=false --functions={math_fn} --artifacts_dir=%t",202 f"iree_tf_tests/math/math_test.py:complex_{math_fn}"203 ])204# Dynamic dim tests.205for variant, flags in BACKENDS:206 for math_fn in TF_MATH_FUNCTIONS:207 generate_runner.main([208 variant,209 f"{flags} --dynamic_dims=true --functions={math_fn} --artifacts_dir=%t",210 f"iree_tf_tests/math/math_test.py:dynamic_dim_{math_fn}"211 ])212 for math_fn in COMPLEX_FUNCTIONS:213 generate_runner.main([214 variant,215 f"{flags} --dynamic_dims=true --functions={math_fn} --artifacts_dir=%t",216 f"iree_tf_tests/math/math_test.py:complex_dynamic_dim_{math_fn}"...

Full Screen

Full Screen

test_python_based_fit.py

Source:test_python_based_fit.py Github

copy

Full Screen

1'''2A python implemention of the fitting algorithm, using scipy's optimization libraries3'''4from os.path import abspath5import numpy as np6import os7from scipy import optimize8from dplus.CalculationInput import CalculationInput9from dplus.CalculationRunner import EmbeddedLocalRunner10from tests.old_stuff.fix_state_files import fix_file11root_path = os.path.dirname(abspath(__file__))12def close_enough(x1, x2):13 if abs(x1 - x2) < 0.01:14 return True15 return False16def run_fit(input):17 generate_runner = EmbeddedLocalRunner()18 def run_generate(xdata, *params):19 '''20 scipy's optimization algorithms require a function that receives an x array and an array of parameters, and21 returns a y array.22 this function will be called repeatedly, until scipy's optimization has completed.23 :param xdata:24 :param params:25 :return:26 '''27 input.set_mutable_parameter_values(28 params) # we take the parameters given by scipy and place them inside our parameter tree29 generate_results = generate_runner.generate(input) # call generate30 return np.array(generate_results.y) # return the results of the generate call31 x_data = input.x32 y_data = input.y33 p0 = input.get_mutable_parameter_values()34 method = 'lm'35 popt, pcov = optimize.curve_fit(run_generate, x_data, y_data, p0=p0, method=method)36 # popt is the optimized set of parameters from those we have indicated as mutable37 # we can insert them into our original input and run generate to get the results of generate with them38 input.set_mutable_parameter_values(popt)39 best_results = generate_runner.generate(input)40 return input, best_results41def test_fit_1():42 state_file = os.path.join(root_path, "files", "2_pops.state")43 fixed_state_file = fix_file(state_file)44 input = CalculationInput.load_from_state_file(fixed_state_file)45 result_input, result = run_fit(input)46 muts = result_input.get_mutable_params()47 assert close_enough(muts[0][0].value, 1)48 assert close_enough(muts[1][0].value, 3)49def test_fit_2():50 from dplus.State import State51 from dplus.DataModels.models import Sphere52 input = CalculationInput()53 s = Sphere()54 s.use_grid = True55 s.layer_params[1]["radius"].value = 2.056 s.layer_params[1]["radius"].mutable = True57 input.Domain.populations[0].add_model(s)58 signal_file = os.path.join(root_path, "files", "sphere.out")59 input.DomainPreferences.signal_file = signal_file60 input.DomainPreferences.use_grid = True61 input.DomainPreferences.grid_size = 20062 input.DomainPreferences.orientation_iterations = 1000063 input.use_gpu = True64 result_input, result = run_fit(input)65 assert close_enough(s.layer_params[1]["radius"].value, 1)66def test_fit_manual():67 input = CalculationInput.load_from_state_file(os.path.join(root_path, "files", "2_pops_fixed.state"))68 generate_runner = EmbeddedLocalRunner()69 def run_generate(xdata, *params):70 '''71 scipy's optimization algorithms require a function that receives an x array and an array of parameters, and72 returns a y array.73 this function will be called repeatedly, until scipy's optimization has completed.74 :param xdata:75 :param params:76 :return:77 '''78 input.set_mutable_parameter_values(79 params) # we take the parameters given by scipy and place them inside our parameter tree80 generate_results = generate_runner.generate(input) # call generate81 return np.array(generate_results.y) # return the results of the generate call82 x_data = input.x83 y_data = input.y84 p0 = input.get_mutable_parameter_values()85 method = 'lm' # lenenberg-marquadt (see scipy documentation)86 popt, pcov = optimize.curve_fit(run_generate, x_data, y_data, p0=p0, method=method)87 # popt is the optimized set of parameters from those we have indicated as mutable88 # we can insert them back into our CalculationInput and create the optmized parameter tree89 input.set_mutable_parameter_values(popt)90 # we can run generate to get the results of generate with them...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful