How to use assert_between method in Testify

Best Python code snippet using Testify_python

test_stacking_models.py

Source:test_stacking_models.py Github

copy

Full Screen

...89 fit_2gaussian(vels, twogauss_spectrum)90 actual_params = [0.5, 0.0, 0.05, 0.5, 0.3]91 for fit, act, err in zip(parvals, actual_params, parerrs):92 npt.assert_allclose(act, fit, atol=2 * err)93def assert_between(act, val, low_err, up_err):94 assert (act >= val - low_err) & (act <= val + up_err)95def test_hwhm_model():96 vels = np.linspace(-1, 1, 100)97 model, gauss, lor = \98 gauss_with_wings_noise(vels, 1., 0.0, 0.1, 0.0, return_models=True)99 noisy_model = \100 gauss_with_wings_noise(vels, 1., 0.0, 0.1, 0.01)101 parvals, parerrs, param_names, hwhm_gauss = \102 fit_hwhm(vels, noisy_model, sigma_noise=0.01,103 niters=100)104 parvals_clean, parerrs_clean = \105 fit_hwhm(vels, model, sigma_noise=0.01,106 niters=100)[:2]107 f_wings = np.sum((model - gauss)) / np.sum(model)108 sigma_wings = np.sqrt(np.sum([(m - g) * v**2 for m, g, v in109 zip(model, gauss, vels)]) /110 np.sum((model - gauss)))111 asymm = 0.0112 kappa = 0.0113 actual_params = [0.1, 0.0, f_wings, sigma_wings, asymm, kappa]114 # Compare the clean parameter estimates115 for act, val, low, up in zip(actual_params, parvals_clean, *parerrs_clean):116 assert_between(act, val, low, up)117 # Compare the noisy parameter estimates118 for act, val, low, up in zip(actual_params, parvals, *parerrs):119 assert_between(act, val, low, up)120def test_hwhm_model_wgauss():121 vels = np.linspace(-1, 1, 100)122 model, gauss = \123 gauss_with_noise(vels, 1., 0.0, 0.1, 0.0, return_models=True)124 noisy_model = \125 gauss_with_noise(vels, 1., 0.0, 0.1, 0.01)126 parvals, parerrs, param_names, hwhm_gauss = \127 fit_hwhm(vels, noisy_model, sigma_noise=0.01,128 niters=100)129 parvals_clean, parerrs_clean = \130 fit_hwhm(vels, model, sigma_noise=0.01,131 niters=100)[:2]132 f_wings = 0.0133 sigma_wings = 0.0134 asymm = 0.0135 kappa = 0.0136 actual_params = [0.1, 0.0, f_wings, sigma_wings, asymm, kappa]137 names = ['sigma', 'vcen', 'fwing', 'sig_wing', 'asymm', 'kappa']138 # Compare the clean parameter estimates139 for name, act, val, low, up in zip(names, actual_params, parvals_clean,140 *parerrs_clean):141 if name == 'sig_wing':142 continue143 assert_between(act, val, low, up)144 # Compare the noisy parameter estimates145 for name, act, val, low, up in zip(names, actual_params, parvals,146 *parerrs):147 if name == 'sig_wing':148 continue149 assert_between(act, val, low, up)150def test_hwhm_model_asymmwings():151 vels = np.linspace(-1, 1, 100)152 model, gauss, lor = \153 gauss_with_wings_noise(vels, 1., 0.0, 0.1, 0.0, return_models=True,154 asymm=-0.85)155 noisy_model = \156 gauss_with_wings_noise(vels, 1., 0.0, 0.1, 0.01,157 asymm=-0.85)158 parvals, parerrs, param_names, hwhm_gauss = \159 fit_hwhm(vels, noisy_model, sigma_noise=0.01,160 niters=100)161 parvals_clean, parerrs_clean = \162 fit_hwhm(vels, model, sigma_noise=0.01,163 niters=100)[:2]164 high_mask = vels > 0.0 + 0.1 * np.sqrt(2 * np.log(2))165 low_mask = vels < 0.0 - 0.1 * np.sqrt(2 * np.log(2))166 # peak_high_mask = np.logical_and(vels > 0.,167 # vels < 0.0 + 0.1 * np.sqrt(2 * np.log(2)))168 # peak_low_mask = np.logical_and(vels < 0.,169 # vels > 0.0 - 0.1 * np.sqrt(2 * np.log(2)))170 f_wings = (np.sum(model[low_mask] - gauss[low_mask]) +171 np.sum(model[high_mask] - gauss[high_mask])) / np.sum(model)172 sigma_wings = np.NaN173 pos_mask = vels > 0174 neg_mask = vels < 0175 asymm = np.sum(model[neg_mask] - model[pos_mask]) / np.sum(model)176 kappa = 0.0177 actual_params = [0.1, 0.0, f_wings, sigma_wings, asymm, kappa]178 names = ['sigma', 'vcen', 'fwing', 'sig_wing', 'asymm', 'kappa']179 # Not comparing the sigma_wings b/c we aren't using it in any analysis180 # and the definition doesn't seem well-suited to substantial asymmetry181 # Compare the clean parameter estimates182 for name, act, val, low, up in zip(names, actual_params, parvals_clean,183 *parerrs_clean):184 if name == 'sig_wing':185 continue186 assert_between(act, val, low, up)187 # Compare the noisy parameter estimates188 for name, act, val, low, up in zip(names, actual_params, parvals,189 *parerrs):190 if name == 'sig_wing':191 continue192 assert_between(act, val, low, up)193def test_symm_asymm_fwing():194 '''195 Use above asymmetric setup.196 '''197 vels = np.linspace(-1, 1, 100)198 model, gauss, lor = \199 gauss_with_wings_noise(vels, 1., 0.0, 0.1, 0.0, return_models=True,200 asymm=-0.85)201 noisy_model = \202 gauss_with_wings_noise(vels, 1., 0.0, 0.1, 0.01,203 asymm=-0.85)204 s_model = model205 n_model = model[::-1]206 tot_model = n_model + s_model207 params_clean = find_linewing_asymm(vels, n_model, s_model)208 s_model_noisy = noisy_model209 n_model_noisy = noisy_model[::-1]210 params, low_lim, up_lim = \211 find_linewing_asymm(vels, n_model_noisy, s_model_noisy,212 niters=100,213 sigma_noise_n=0.01,214 sigma_noise_s=0.01)215 high_mask = vels > 0.0 + 0.1 * np.sqrt(2 * np.log(2))216 low_mask = vels < 0.0 - 0.1 * np.sqrt(2 * np.log(2))217 # We added the model twice, so multiple gauss by 2218 f_wings = (np.sum(tot_model[low_mask] - 2 * gauss[low_mask]) +219 np.sum(tot_model[high_mask] - 2 * gauss[high_mask])) / \220 np.sum(tot_model)221 blue_excess = np.sum(s_model[low_mask] - n_model[low_mask])222 red_excess = np.sum(n_model[high_mask] - s_model[high_mask])223 f_asymm = (blue_excess + red_excess) / np.sum(tot_model)224 f_symm = f_wings - f_asymm225 npt.assert_allclose(f_wings, params_clean[0], atol=0.005)226 assert_between(f_wings, params[0], low_lim[0], up_lim[0])227 npt.assert_allclose(f_symm, params_clean[1], atol=0.005)228 assert_between(f_symm, params[1], low_lim[1], up_lim[1])229 npt.assert_allclose(f_asymm, params_clean[2], atol=0.005)230 # Errors seem to be a bit more asymmetric for f_asymm. Allow231 # a larger range232 avg_sig = 0.5 * ((params[2] - low_lim[2]) + (up_lim[2] - params[2]))233 assert_between(f_asymm, params[2], low_lim[2] - 0.5 * avg_sig,...

Full Screen

Full Screen

test_pca.py

Source:test_pca.py Github

copy

Full Screen

...26 slice_used = slice(0, tester.n_eigs)27 npt.assert_allclose(tester.eigvals[slice_used],28 computed_data['pca_val'][slice_used])29 fit_values = computed_data["pca_fit_vals"].reshape(-1)[0]30 assert_between(fit_values["index"], tester.index_error_range[0],31 tester.index_error_range[1])32 assert_between(fit_values["gamma"], tester.gamma_error_range[0],33 tester.gamma_error_range[1])34 assert_between(fit_values["intercept"].value,35 tester.intercept_error_range[0].value,36 tester.intercept_error_range[1].value)37 assert_between(fit_values["sonic_length"].value,38 tester.sonic_length()[1][0].value,39 tester.sonic_length()[1][1].value)40@pytest.mark.skipif("not EMCEE_INSTALLED")41def test_PCA_method_w_bayes():42 tester = PCA(dataset1["cube"])43 tester.run(mean_sub=True, n_eigs=50,44 spatial_method='contour',45 spectral_method='walk-down',46 fit_method='bayes', brunt_beamcorrect=False)47 slice_used = slice(0, tester.n_eigs)48 npt.assert_allclose(tester.eigvals[slice_used],49 computed_data['pca_val'][slice_used])50 fit_values = computed_data["pca_fit_vals"].reshape(-1)[0]51 assert_between(fit_values["index_bayes"], tester.index_error_range[0],52 tester.index_error_range[1])53 assert_between(fit_values["gamma_bayes"], tester.gamma_error_range[0],54 tester.gamma_error_range[1])55 assert_between(fit_values["intercept_bayes"].value,56 tester.intercept_error_range[0].value,57 tester.intercept_error_range[1].value)58 assert_between(fit_values["sonic_length_bayes"].value,59 tester.sonic_length()[1][0].value,60 tester.sonic_length()[1][1].value)61@pytest.mark.parametrize(("method", "min_eigval"),62 [("proportion", 0.99), ("value", 0.001)])63def test_PCA_auto_n_eigs(method, min_eigval):64 tester = PCA(dataset1["cube"])65 tester.run(mean_sub=True, n_eigs='auto', min_eigval=min_eigval,66 eigen_cut_method=method, decomp_only=True)67 fit_values = computed_data["pca_fit_vals"].reshape(-1)[0]68 assert tester.n_eigs == fit_values["n_eigs_" + method]69def test_PCA_distance():70 tester_dist = \71 PCA_Distance(dataset1["cube"],72 dataset2["cube"]).distance_metric()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Testify automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful