How to use raises_exception method in avocado

Best Python code snippet using avocado_python

test_train_utils.py

Source:test_train_utils.py Github

copy

Full Screen

1from typing import Any, Dict, List2import numpy as np3import pytest4from typing import Text5import rasa.utils.train_utils as train_utils6from rasa.nlu.constants import NUMBER_OF_SUB_TOKENS7from rasa.nlu.tokenizers.tokenizer import Token8from rasa.shared.nlu.constants import (9 SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,10 SPLIT_ENTITIES_BY_COMMA,11)12from rasa.utils.tensorflow.constants import (13 MODEL_CONFIDENCE,14 RANKING_LENGTH,15 RENORMALIZE_CONFIDENCES,16 SIMILARITY_TYPE,17 LOSS_TYPE,18 COSINE,19 SOFTMAX,20 INNER,21 CROSS_ENTROPY,22 MARGIN,23 AUTO,24 TOLERANCE,25 CHECKPOINT_MODEL,26 EVAL_NUM_EPOCHS,27 EVAL_NUM_EXAMPLES,28 EPOCHS,29)30from rasa.shared.exceptions import InvalidConfigException31def test_align_token_features():32 tokens = [33 Token("This", 0, data={NUMBER_OF_SUB_TOKENS: 1}),34 Token("is", 5, data={NUMBER_OF_SUB_TOKENS: 1}),35 Token("a", 8, data={NUMBER_OF_SUB_TOKENS: 1}),36 Token("sentence", 10, data={NUMBER_OF_SUB_TOKENS: 2}),37 Token("embedding", 19, data={NUMBER_OF_SUB_TOKENS: 4}),38 ]39 seq_dim = sum(t.get(NUMBER_OF_SUB_TOKENS) for t in tokens)40 token_features = np.random.rand(1, seq_dim, 64)41 actual_features = train_utils.align_token_features([tokens], token_features)42 assert np.all(actual_features[0][0] == token_features[0][0])43 assert np.all(actual_features[0][1] == token_features[0][1])44 assert np.all(actual_features[0][2] == token_features[0][2])45 # sentence is split into 2 sub-tokens46 assert np.all(actual_features[0][3] == np.mean(token_features[0][3:5], axis=0))47 # embedding is split into 4 sub-tokens48 assert np.all(actual_features[0][4] == np.mean(token_features[0][5:10], axis=0))49@pytest.mark.parametrize(50 (51 "input_values, ranking_length, renormalize, possible_output_values, "52 " resulting_ranking_length"53 ),54 [55 # keep the top 256 ([0.1, 0.4, 0.01], 2, False, [[0.1, 0.4, 0.0]], 2),57 # normalize top 258 ([0.1, 0.4, 0.01], 2, True, [[0.2, 0.8, 0.0]], 2),59 # 2 possible values that could be excluded60 ([0.1, 0.4, 0.1], 2, True, [[0.0, 0.8, 0.2], [0.2, 0.8, 0.0]], 2),61 # ranking_length > num_confidences => ranking_length := num_confidences62 ([0.1, 0.3, 0.2], 5, False, [[0.1, 0.3, 0.2]], 3),63 # ranking_length > num_confidences => ranking_length := num_confidences64 ([0.1, 0.3, 0.1], 5, True, [[0.1, 0.3, 0.1]], 3),65 # ranking_length == 0 => ranking_length := num_confidences66 ([0.1, 0.3, 0.1], 0, True, [[0.1, 0.3, 0.1]], 3),67 ],68)69def test_rank_and_mask(70 input_values: List[float],71 ranking_length: int,72 possible_output_values: List[List[float]],73 renormalize: bool,74 resulting_ranking_length: int,75):76 confidences = np.array(input_values)77 indices, modified_confidences = train_utils.rank_and_mask(78 confidences=confidences, ranking_length=ranking_length, renormalize=renormalize79 )80 assert any(81 np.allclose(modified_confidences, np.array(possible_output))82 for possible_output in possible_output_values83 )84 assert np.allclose(85 sorted(input_values, reverse=True)[:resulting_ranking_length],86 confidences[indices],87 )88@pytest.mark.parametrize(89 "split_entities_config, expected_initialized_config",90 [91 (92 SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,93 {SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE},94 ),95 (96 {"address": False, "ingredients": True},97 {98 "address": False,99 "ingredients": True,100 SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,101 },102 ),103 ],104)105def test_init_split_entities_config(106 split_entities_config: Any, expected_initialized_config: Dict[(str, bool)]107):108 assert (109 train_utils.init_split_entities(110 split_entities_config, SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE111 )112 == expected_initialized_config113 )114@pytest.mark.parametrize(115 "component_config, raises_exception",116 [117 ({MODEL_CONFIDENCE: SOFTMAX, LOSS_TYPE: MARGIN}, True),118 ({MODEL_CONFIDENCE: SOFTMAX, LOSS_TYPE: CROSS_ENTROPY}, False),119 ({MODEL_CONFIDENCE: INNER, LOSS_TYPE: MARGIN}, True),120 ({MODEL_CONFIDENCE: INNER, LOSS_TYPE: CROSS_ENTROPY}, True),121 ({MODEL_CONFIDENCE: COSINE, LOSS_TYPE: MARGIN}, True),122 ({MODEL_CONFIDENCE: COSINE, LOSS_TYPE: CROSS_ENTROPY}, True),123 ],124)125def test_confidence_loss_settings(126 component_config: Dict[Text, Any], raises_exception: bool127):128 component_config[SIMILARITY_TYPE] = INNER129 if raises_exception:130 with pytest.raises(InvalidConfigException):131 train_utils._check_confidence_setting(component_config)132 else:133 train_utils._check_confidence_setting(component_config)134@pytest.mark.parametrize(135 "component_config, raises_exception",136 [137 ({MODEL_CONFIDENCE: SOFTMAX, SIMILARITY_TYPE: INNER}, False),138 ({MODEL_CONFIDENCE: SOFTMAX, SIMILARITY_TYPE: COSINE}, True),139 ],140)141def test_confidence_similarity_settings(142 component_config: Dict[Text, Any], raises_exception: bool143):144 component_config[LOSS_TYPE] = CROSS_ENTROPY145 if raises_exception:146 with pytest.raises(InvalidConfigException):147 train_utils._check_confidence_setting(component_config)148 else:149 train_utils._check_confidence_setting(component_config)150@pytest.mark.parametrize(151 "component_config, raises_exception",152 [153 (154 {155 MODEL_CONFIDENCE: SOFTMAX,156 SIMILARITY_TYPE: INNER,157 RENORMALIZE_CONFIDENCES: True,158 RANKING_LENGTH: 10,159 },160 False,161 ),162 (163 {164 MODEL_CONFIDENCE: SOFTMAX,165 SIMILARITY_TYPE: INNER,166 RENORMALIZE_CONFIDENCES: False,167 RANKING_LENGTH: 10,168 },169 False,170 ),171 (172 {173 MODEL_CONFIDENCE: AUTO,174 SIMILARITY_TYPE: INNER,175 RENORMALIZE_CONFIDENCES: True,176 RANKING_LENGTH: 10,177 },178 True,179 ),180 (181 {182 MODEL_CONFIDENCE: AUTO,183 SIMILARITY_TYPE: INNER,184 RENORMALIZE_CONFIDENCES: False,185 RANKING_LENGTH: 10,186 },187 False,188 ),189 ],190)191def test_confidence_renormalization_settings(192 component_config: Dict[Text, Any], raises_exception: bool193):194 component_config[LOSS_TYPE] = CROSS_ENTROPY195 if raises_exception:196 with pytest.raises(InvalidConfigException):197 train_utils._check_confidence_setting(component_config)198 else:199 train_utils._check_confidence_setting(component_config)200@pytest.mark.parametrize(201 "component_config, model_confidence",202 [203 ({MODEL_CONFIDENCE: SOFTMAX, LOSS_TYPE: MARGIN}, AUTO),204 ({MODEL_CONFIDENCE: SOFTMAX, LOSS_TYPE: CROSS_ENTROPY}, SOFTMAX),205 ],206)207def test_update_confidence_type(208 component_config: Dict[Text, Text], model_confidence: Text209):210 component_config = train_utils.update_confidence_type(component_config)211 assert component_config[MODEL_CONFIDENCE] == model_confidence212@pytest.mark.parametrize(213 "component_config, raises_exception",214 [215 ({TOLERANCE: 0.5}, False),216 ({TOLERANCE: 0.0}, False),217 ({TOLERANCE: 1.0}, False),218 ({TOLERANCE: -1.0}, True),219 ({TOLERANCE: 2.0}, True),220 ({}, False),221 ],222)223def test_tolerance_setting(component_config: Dict[Text, float], raises_exception: bool):224 if raises_exception:225 with pytest.raises(InvalidConfigException):226 train_utils._check_tolerance_setting(component_config)227 else:228 train_utils._check_tolerance_setting(component_config)229@pytest.mark.parametrize(230 "component_config",231 [232 (233 {234 CHECKPOINT_MODEL: True,235 EVAL_NUM_EPOCHS: -2,236 EVAL_NUM_EXAMPLES: 10,237 EPOCHS: 5,238 }239 ),240 (241 {242 CHECKPOINT_MODEL: True,243 EVAL_NUM_EPOCHS: 0,244 EVAL_NUM_EXAMPLES: 10,245 EPOCHS: 5,246 }247 ),248 ],249)250def test_warning_incorrect_eval_num_epochs(component_config: Dict[Text, Text]):251 with pytest.warns(UserWarning) as record:252 train_utils._check_evaluation_setting(component_config)253 assert len(record) == 1254 assert (255 f"'{EVAL_NUM_EPOCHS}' is not -1 or greater than 0. Training will fail"256 in record[0].message.args[0]257 )258@pytest.mark.parametrize(259 "component_config",260 [261 ({CHECKPOINT_MODEL: True, EVAL_NUM_EPOCHS: 10, EPOCHS: 5}),262 ({CHECKPOINT_MODEL: False, EVAL_NUM_EPOCHS: 10, EPOCHS: 5}),263 ],264)265def test_warning_eval_num_epochs_greater_than_epochs(266 component_config: Dict[Text, Text]267):268 warning = (269 f"'{EVAL_NUM_EPOCHS}={component_config[EVAL_NUM_EPOCHS]}' is "270 f"greater than '{EPOCHS}={component_config[EPOCHS]}'."271 f" No evaluation will occur."272 )273 with pytest.warns(UserWarning) as record:274 train_utils._check_evaluation_setting(component_config)275 assert len(record) == 1276 if component_config[CHECKPOINT_MODEL]:277 warning = (278 f"You have opted to save the best model, but {warning} "279 "No checkpoint model will be saved."280 )281 assert warning in record[0].message.args[0]282@pytest.mark.parametrize(283 "component_config",284 [285 ({CHECKPOINT_MODEL: True, EVAL_NUM_EPOCHS: 1, EVAL_NUM_EXAMPLES: 0, EPOCHS: 5}),286 (287 {288 CHECKPOINT_MODEL: True,289 EVAL_NUM_EPOCHS: 1,290 EVAL_NUM_EXAMPLES: -1,291 EPOCHS: 5,292 }293 ),294 ],295)296def test_warning_incorrect_eval_num_examples(component_config: Dict[Text, Text]):297 with pytest.warns(UserWarning) as record:298 train_utils._check_evaluation_setting(component_config)299 assert len(record) == 1300 assert (301 f"'{EVAL_NUM_EXAMPLES}' is not greater than 0. No checkpoint model "302 f"will be saved"303 ) in record[0].message.args[0]304@pytest.mark.parametrize(305 "component_config",306 [307 (308 {309 CHECKPOINT_MODEL: False,310 EVAL_NUM_EPOCHS: 0,311 EVAL_NUM_EXAMPLES: 0,312 EPOCHS: 5,313 }314 ),315 (316 {317 CHECKPOINT_MODEL: True,318 EVAL_NUM_EPOCHS: 1,319 EVAL_NUM_EXAMPLES: 10,320 EPOCHS: 5,321 }322 ),323 ],324)325def test_no_warning_correct_checkpoint_setting(component_config: Dict[Text, Text]):326 with pytest.warns(None) as record:327 train_utils._check_evaluation_setting(component_config)...

Full Screen

Full Screen

tests.py

Source:tests.py Github

copy

Full Screen

...48 # Backup original environment variable49 address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ50 old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')51 # Just the host is not accepted52 cls.raises_exception('localhost', ImproperlyConfigured)53 # The host must be valid54 cls.raises_exception('blahblahblah:8081', WSGIServerException)55 # The list of ports must be in a valid format56 cls.raises_exception('localhost:8081,', ImproperlyConfigured)57 cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)58 cls.raises_exception('localhost:8081-', ImproperlyConfigured)59 cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)60 cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)61 # If contrib.staticfiles isn't configured properly, the exception62 # should bubble up to the main thread.63 old_STATIC_URL = TEST_SETTINGS['STATIC_URL']64 TEST_SETTINGS['STATIC_URL'] = None65 cls.raises_exception('localhost:8081', ImproperlyConfigured)66 TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL67 # Restore original environment variable68 if address_predefined:69 os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address70 else:71 del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']72 @classmethod73 def raises_exception(cls, address, exception):74 os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address75 try:76 super(LiveServerAddress, cls).setUpClass()77 raise Exception("The line above should have raised an exception")78 except exception:79 pass80 finally:81 super(LiveServerAddress, cls).tearDownClass()82 def test_test_test(self):83 # Intentionally empty method so that the test is picked up by the84 # test runner and the overriden setUpClass() method is executed.85 pass86class LiveServerViews(LiveServerBase):87 def test_404(self):...

Full Screen

Full Screen

test_host_alibaba.py

Source:test_host_alibaba.py Github

copy

Full Screen

1# coding=utf-82"""apyfal.host.aws tests"""3import json4import pytest5from tests.test_host_csp import run_full_real_test_sequence, import_from_generic_test6def test_alibibaclass_import():7 """AlibabaHost import"""8 # Test: Import by factory without errors9 import_from_generic_test('Alibaba')10def test_alibibaclass_request():11 """AlibabaHost._request"""12 import apyfal.host.alibaba as alibaba13 from apyfal.host.alibaba import AlibabaCSP14 import apyfal.exceptions as exc15 from aliyunsdkcore import client16 from aliyunsdkcore.acs_exception.exceptions import ServerException17 # Mocks some variables18 client_id = 'dummy_access_key'19 secret_id = 'dummy_secret_key'20 region = 'dummy_region_id'21 action = 'DummyAction'22 parameters = {'DummyString': 'dummy_value',23 'DummyNumber': 0,24 'DummyList': ['dummy_value']}25 response = {'DummyResponse': 0}26 raises_exception = []27 status_desc = 'testing'28 # Mocks client29 class DummyAcsClient:30 """Mocked AcsClient"""31 def __init__(self, ak, secret, region_id):32 """Checks parameters"""33 assert ak == client_id34 assert secret == secret_id35 assert region_id == region36 @staticmethod37 def do_action_with_exception(acs_request):38 """Checks parameters returns fake response and39 raise exceptions"""40 # Checks request41 assert acs_request.get_action_name() == action42 acs_request_params = acs_request.get_query_params()43 for param in parameters:44 assert param in acs_request_params45 assert isinstance(acs_request_params[param], str)46 assert 'ClientToken' in acs_request_params47 assert acs_request.get_protocol_type() == "https"48 # Raises fake exceptions49 if raises_exception:50 raise ServerException(*raises_exception)51 # Returns fake response52 return json.dumps(response)53 client_acs_client = client.AcsClient54 client.AcsClient = DummyAcsClient55 alibaba._AcsClient = DummyAcsClient56 # Tests57 try:58 csp = AlibabaCSP(client_id=client_id, secret_id=secret_id, region=region)59 # Everything OK60 assert csp._request(action, **parameters) == response61 # Raise exception62 raises_exception = ['DummyCode', 'dummy_message']63 with pytest.raises(exc.HostRuntimeException) as exc_info:64 csp._request(action, **parameters)65 for part in raises_exception:66 assert part in exc_info67 raises_exception[0] = 'InvalidParameter'68 with pytest.raises(exc.HostConfigurationException):69 csp._request(action, **parameters)70 raises_exception[0] = 'InvalidAccessKey'71 with pytest.raises(exc.HostAuthenticationException):72 csp._request(action, **parameters)73 # Filter codes74 raises_exception[0] = 'DummyCode'75 with pytest.raises(ServerException):76 csp._request(action, error_code_filter='DummyCode', **parameters)77 assert csp._request(78 action, error_code_ignore='DummyCode', **parameters) is None79 # Test "_instance_request"80 raises_exception = []81 assert csp._instance_request(action, **parameters) == response82 # Tests "_instance_request" timeout if instance with incorrect status83 raises_exception = ['IncorrectInstanceStatus', 'dummy_message']84 parameters['InstanceId'] = 'dummy_instance_id'85 csp.TIMEOUT = 0.086 with pytest.raises(exc.HostRuntimeException) as exc_info:87 csp._instance_request(action, status_desc=status_desc, **parameters)88 assert status_desc in exc_info89 # Tests "_instance_request" stills throw other exceptions90 raises_exception[0] = 'DummyCode'91 with pytest.raises(exc.HostRuntimeException) as exc_info:92 csp._instance_request(action, status_desc=status_desc, **parameters)93 for part in raises_exception:94 assert part in exc_info95 # Restore AcsClient96 finally:97 client.AcsClient = client_acs_client98 alibaba._AcsClient = client_acs_client99@pytest.mark.need_csp100@pytest.mark.need_csp_alibaba101def test_alibabaclass_real():102 """AlibabaHost in real case"""103 run_full_real_test_sequence('Alibaba', {104 'cn-hangzhou': {105 # Image name: Debian 8.9 64bit / 20Go HDD106 'image': 'debian_8_09_64_20G_alibase_20170824.vhd',107 'instancetype': 'ecs.t5-lc2m1.nano',...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful