How to use implementation method in hypothesis

Best Python code snippet using hypothesis

test_command_stack_commands___init__.py

Source:test_command_stack_commands___init__.py Github

copy

Full Screen

1from stack.commands import Command, Implementation, DatabaseConnection2from stack.argument_processors.scope import ScopeArgProcessor3from stack.exception import CommandError4from unittest.mock import patch, create_autospec, ANY, MagicMock5from concurrent.futures import Future6from collections import namedtuple7import time8import pytest9class CommandUnderTest(Command):10 """A subclass of Command that replaces __init__ to remove the database dependency."""11 def __init__(self, *args, **kwargs):12 # needed for implementation running and loading functions13 self.impl_list = {}14 # needed for anything that messes with the DB15 self.db = create_autospec(16 spec = DatabaseConnection,17 instance = True,18 )19 self.db.database = MagicMock()20 self.level = 021class TestCommand:22 @patch.object(Command, 'loadImplementation', autospec = True)23 @patch('stack.commands.ThreadPoolExecutor', autospec = True)24 def test_run_implementations_parallel(self, mock_executor, mock_loadImplementation):25 """Test the normal case where all implementations are loadable and run to completion."""26 def load_impl(self, name):27 """Side effect for mock_loadImplementation to set the impl_list"""28 self.impl_list[name] = "_"29 test_command = CommandUnderTest()30 # set the side effect of loadImplementation to set impl list so that one is loaded.31 # The value won't actually be used because we mock the ThreadPoolExecutor.32 mock_loadImplementation.side_effect = load_impl33 # create mock futures specced off of the actual Future class.34 mock_future1 = create_autospec(spec = Future, spec_set = True, instance = True)35 mock_future1.exception.return_value = None36 mock_future1.result.return_value = "Look ma, I'm threading!"37 mock_future2 = create_autospec(spec = Future, spec_set = True)38 mock_future2.exception.return_value = Exception("And that's when Jimmy burst into flames.")39 mock_future2.result.return_value = None40 # Since the ThreadPoolExecutor is used as a context manager, we need this long chain to41 # set the return value of the submit call.42 mock_executor.return_value.__enter__.return_value.submit.side_effect = (mock_future1, mock_future2)43 test_implementation_mapping = {"foo": "bar", "baz": "bog"}44 result = test_command.run_implementations_parallel(45 implementation_mapping = test_implementation_mapping46 )47 assert result == {48 "foo": namedtuple("_", ("result", "exception"))(49 result = mock_future1.result(),50 exception = mock_future1.exception(),51 ),52 "baz": namedtuple("_", ("result", "exception"))(53 result = mock_future2.result(),54 exception = mock_future2.exception(),55 )56 }57 for key, value in test_implementation_mapping.items():58 mock_loadImplementation.assert_any_call(test_command, name = key)59 mock_executor.return_value.__enter__.return_value.submit.assert_any_call(60 ANY,61 name = key,62 args = value,63 )64 @patch.object(Command, 'loadImplementation', autospec = True)65 def test_run_implementations_parallel_real_threads(self, mock_loadImplementation):66 """Test the normal case with real threads, but mocking the implementation run function.67 This is used to ensure the callable submitted is actually going to run the implementation.68 """69 def load_impl(self, name):70 """Side effect for mock_loadImplementation to set the impl_list"""71 mock_imp = create_autospec(spec = Implementation, spec_set = True, instance = True)72 mock_imp.run.return_value = "Look ma, I'm threading!"73 self.impl_list[name] = mock_imp74 test_command = CommandUnderTest()75 # set the side effect of loadImplementation to set impl list so that one is loaded.76 # The value won't actually be used because we mock the ThreadPoolExecutor.77 mock_loadImplementation.side_effect = load_impl78 test_implementation_mapping = {"foo": "bar", "baz": "bog"}79 result = test_command.run_implementations_parallel(80 implementation_mapping = test_implementation_mapping81 )82 assert result == {83 key: namedtuple("_", ("result", "exception"))(84 result = "Look ma, I'm threading!",85 exception = None,86 )87 for key in test_implementation_mapping88 }89 @patch.object(Command, 'loadImplementation', autospec = True)90 def test_run_implementations_parallel_real_threads_real_errors(self, mock_loadImplementation):91 """Test the normal case with real threads, but mocking the implementation run function.92 This is used to ensure the callable submitted is actually going to run the implementation.93 """94 def load_impl(self, name):95 """Side effect for mock_loadImplementation to set the impl_list"""96 mock_imp = create_autospec(spec = Implementation, spec_set = True, instance = True)97 mock_imp.run.side_effect = Exception("And that's when Jimmy burst into flames.")98 self.impl_list[name] = mock_imp99 test_command = CommandUnderTest()100 # set the side effect of loadImplementation to set impl list so that one is loaded.101 # The value won't actually be used because we mock the ThreadPoolExecutor.102 mock_loadImplementation.side_effect = load_impl103 test_implementation_mapping = {"foo": "bar", "baz": "bog"}104 result = test_command.run_implementations_parallel(105 implementation_mapping = test_implementation_mapping106 )107 # exception class equality doesn't work the way we want here :(108 assert all(key in result for key in ("foo", "baz"))109 test_exception = Exception("And that's when Jimmy burst into flames.")110 for value in result.values():111 assert value.result is None112 assert type(value.exception) is type(test_exception)113 assert value.exception.args == test_exception.args114 @patch.object(Command, 'loadImplementation', autospec = True)115 @patch('stack.commands.ThreadPoolExecutor', autospec = True)116 def test_run_implementations_parallel_empty_args(self, mock_executor, mock_loadImplementation):117 """Test that passing a empty implementation mapping causes no actions to occur."""118 test_command = CommandUnderTest()119 result = test_command.run_implementations_parallel(implementation_mapping = {})120 assert result == {}121 mock_executor.return_value.__enter__.return_value.assert_not_called()122 mock_loadImplementation.assert_not_called()123 @patch.object(Command, 'loadImplementation', autospec = True)124 @patch('stack.commands.ThreadPoolExecutor', autospec = True)125 def test_run_implementations_parallel_not_loadable(self, mock_executor, mock_loadImplementation):126 """Test that when an implementation is not loadable, the result is None for that imp name."""127 test_command = CommandUnderTest()128 test_implementation_mapping = {"foo": "bar", "baz": "bog"}129 result = test_command.run_implementations_parallel(implementation_mapping = test_implementation_mapping)130 assert result == {key: None for key in test_implementation_mapping}131 mock_executor.return_value.__enter__.return_value.assert_not_called()132 @patch.object(Command, 'loadImplementation', autospec = True)133 @patch('stack.commands.ThreadPoolExecutor', autospec = True)134 def test_run_implementations_parallel_partial_loadable(self, mock_executor, mock_loadImplementation):135 """Test being able to load some implementations but not all."""136 def load_impl(self, name):137 if name == "baz":138 self.impl_list = {name: "_"}139 test_command = CommandUnderTest()140 # set the side effect of loadImplementation to set impl list so that one is loaded.141 # The value won't actually be used because we mock the ThreadPoolExecutor.142 mock_loadImplementation.side_effect = load_impl143 # create a mock future specced off of the actual Future class.144 mock_future = create_autospec(spec = Future, spec_set = True)145 mock_future.exception.return_value = None146 mock_future.result.return_value = None147 # Since the ThreadPoolExecutor is used as a context manager, we need this long chain to148 # set the return value of the submit call.149 mock_executor.return_value.__enter__.return_value.submit.return_value = mock_future150 test_implementation_mapping = {"foo": "bar", "baz": "bog"}151 result = test_command.run_implementations_parallel(152 implementation_mapping = test_implementation_mapping153 )154 assert result == {155 "foo": None,156 "baz": namedtuple("_", ("result", "exception"))(157 result = None,158 exception = None,159 )160 }161 # should only be one implementation submitted162 mock_executor.return_value.__enter__.return_value.submit.assert_called_once_with(163 ANY,164 name = "baz",165 args = "bog",166 )167 @patch.object(Command, 'loadImplementation', autospec = True)168 @patch('stack.commands.ThreadPoolExecutor', autospec = True)169 def test_run_implementations_parallel_already_loaded(self, mock_executor, mock_loadImplementation):170 """Tests that the already loaded implementations are used."""171 test_implementation_mapping = {"foo": "bar", "baz": "bog"}172 test_command = CommandUnderTest()173 test_command.impl_list = {key: "_" for key in test_implementation_mapping}174 # create a mock future specced off of the actual Future class.175 mock_future = create_autospec(spec = Future, spec_set = True)176 mock_future.exception.return_value = None177 mock_future.result.return_value = None178 # Since the ThreadPoolExecutor is used as a context manager, we need this long chain to179 # set the return value of the submit call.180 mock_executor.return_value.__enter__.return_value.submit.return_value = mock_future181 result = test_command.run_implementations_parallel(182 implementation_mapping = test_implementation_mapping183 )184 assert result == {185 key: namedtuple("_", ("result", "exception"))(186 result = mock_future.result(),187 exception = mock_future.exception(),188 )189 for key in test_implementation_mapping190 }191 # should not try to load any implementations192 mock_loadImplementation.assert_not_called()193 @patch.object(Command, 'loadImplementation', autospec = True)194 @patch('stack.commands.ThreadPoolExecutor', autospec = True)195 def test_run_implementations_parallel_output_enabled_fast_implementations(self, mock_executor, mock_loadImplementation):196 """Coverage case for the display_progress option where the future finishes before the timer ends."""197 test_implementation_mapping = {"foo": "bar", "baz": "bog"}198 test_command = CommandUnderTest()199 test_command.impl_list = {key: "_" for key in test_implementation_mapping}200 # create a mock future specced off of the actual Future class.201 mock_future = create_autospec(spec = Future, spec_set = True)202 mock_future.exception.return_value = None203 mock_future.result.return_value = None204 mock_future.done.return_value = True205 # Since the ThreadPoolExecutor is used as a context manager, we need this long chain to206 # set the return value of the submit call.207 mock_executor.return_value.__enter__.return_value.submit.return_value = mock_future208 result = test_command.run_implementations_parallel(209 implementation_mapping = test_implementation_mapping,210 display_progress = True211 )212 assert result == {213 key: namedtuple("_", ("result", "exception"))(214 result = mock_future.result(),215 exception = mock_future.exception(),216 )217 for key in test_implementation_mapping218 }219 @patch.object(Command, 'loadImplementation', autospec = True)220 def test_run_implementations_parallel_output_enabled_slow_implementations(self, mock_loadImplementation):221 """Coverage case for the display_progress option where the future is still not done after the timer expires."""222 test_implementation_mapping = {"foo": "bar", "baz": "bog"}223 test_command = CommandUnderTest()224 # create dummy imps where the side_effect of calling run is to sleep.225 mock_imp1 = create_autospec(spec = Implementation, spec_set = True, instance = True)226 mock_imp1.run.side_effect = lambda args: time.sleep(4)227 mock_imp2 = create_autospec(spec = Implementation, spec_set = True, instance = True)228 mock_imp2.run.side_effect = lambda args: time.sleep(5)229 gen = (impl for impl in (mock_imp1, mock_imp2))230 test_command.impl_list = {231 key: next(gen) for key in test_implementation_mapping232 }233 result = test_command.run_implementations_parallel(234 implementation_mapping = test_implementation_mapping,235 display_progress = True236 )237 assert result == {238 key: namedtuple("_", ("result", "exception"))(239 result = None,240 exception = None,241 )242 for key in test_implementation_mapping243 }244 # To get this to work we're essentially overriding the global __import__ and eval functions245 # in the stack.commands module with mock objects. Mocking the builtins directly doesn't seem246 # to work for eval.247 @patch(target = "stack.commands.__import__", create = True)248 @patch(target = "stack.commands.eval", create = True)249 def test_command_command_error_exception_handling(self, mock_eval, mock__import__):250 """Test that the CommandError raised contains the command run and the lower tier exception information."""251 # Set the mock's side effect for when runWrapper is called to raise a CommandError.252 mock_eval.return_value.Command.return_value.runWrapper.side_effect = CommandError(253 cmd = create_autospec(spec = Command, instance = True),254 msg = "test error",255 )256 test_command = CommandUnderTest()257 with pytest.raises(CommandError) as exception_info:258 test_command.command(command = "foo.bar.baz", args = ["a", "b=c"])259 # make sure the command is listed as well as its arguments260 assert "foo bar baz a b=c" in exception_info.value.message()261 # make sure the CommandError's message is passed along as well262 assert "test error" in exception_info.value.message()263 # To get this to work we're essentially overriding the global __import__ and eval functions264 # in the stack.commands module with mock objects. Mocking the builtins directly doesn't seem265 # to work for eval.266 @patch(target = "stack.commands.__import__", create = True)267 @patch(target = "stack.commands.eval", create = True)268 def test_command_command_error_exception_handling_verbose_off(self, mock_eval, mock__import__):269 """Test that the CommandError raised does not contain the command run when verbose errors are turned off."""270 # Set the mock's side effect for when runWrapper is called to raise a CommandError.271 mock_eval.return_value.Command.return_value.runWrapper.side_effect = CommandError(272 cmd = create_autospec(spec = Command, instance = True),273 msg = "test error",274 )275 test_command = CommandUnderTest()276 with pytest.raises(CommandError) as exception_info:277 test_command.command(command = "foo.bar.baz", args = ["a", "b=c"], verbose_errors = False)278 # make sure the command is not listed279 assert "foo bar baz a b=c" not in exception_info.value.message()280 # make sure the CommandError's message is passed along as well281 assert "test error" in exception_info.value.message()282 @pytest.mark.parametrize("verbose_errors", (True, False))283 # To get this to work we're essentially overriding the global __import__ and eval functions284 # in the stack.commands module with mock objects. Mocking the builtins directly doesn't seem285 # to work for eval.286 @patch(target = "stack.commands.__import__", create = True)287 @patch(target = "stack.commands.eval", create = True)288 def test_command_exception_handling(self, mock_eval, mock__import__, verbose_errors):289 """Test that non-CommandErrors cause a RuntimeError to be raised that contains the command run.290 This should happen regardless of whether verbose_errors were turned off or not.291 """292 # The getattr is used to return the Command class in the eval'd module and293 # construct the Command instance. Return a mock instead294 mock_eval.return_value.Command = MagicMock()295 # Set the mock's side effect for when runWrapper is called to raise a CommandError.296 mock_eval.return_value.Command.return_value.runWrapper.side_effect = ValueError(297 "test error",298 )299 test_command = CommandUnderTest()300 with pytest.raises(RuntimeError) as exception_info:301 test_command.command(command = "foo.bar.baz", args = ["a", "b=c"], verbose_errors = verbose_errors)302 # make sure the command is listed as well as its arguments303 assert "foo bar baz a b=c" in str(exception_info.value)304class TestScopeArgProcessor:305 """Test case for the ScopeArgProcessor"""306 def test_getScopeMappings_global_scope(self):307 """Test that getting the scope mappings works as expected for the global scope."""308 test_scope = "global"309 result = ScopeArgProcessor().getScopeMappings(scope = test_scope)310 assert [(test_scope, None, None, None, None)] == result311 def test_getScopeMappings_global_scope_with_args(self):312 """Test that getting the scope mappings for the global scope fails when additional args are passed."""313 test_scope = "global"314 test_args = ["foo", "bar"]315 with pytest.raises(CommandError):...

Full Screen

Full Screen

help.py

Source:help.py Github

copy

Full Screen

...16 cryptography = None17else:18 import OpenSSL19 import cryptography20def _implementation():21 """Return a dict with the Python implementation and version.22 Provide both the name and the version of the Python implementation23 currently running. For example, on CPython 2.7.5 it will return24 {'name': 'CPython', 'version': '2.7.5'}.25 This function works best on CPython and PyPy: in particular, it probably26 doesn't work for Jython or IronPython. Future investigation should be done27 to work out the correct shape of the code for those platforms.28 """29 implementation = platform.python_implementation()30 if implementation == 'CPython':31 implementation_version = platform.python_version()32 elif implementation == 'PyPy':33 implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,34 sys.pypy_version_info.minor,35 sys.pypy_version_info.micro)36 if sys.pypy_version_info.releaselevel != 'final':37 implementation_version = ''.join([38 implementation_version, sys.pypy_version_info.releaselevel39 ])40 elif implementation == 'Jython':41 implementation_version = platform.python_version() # Complete Guess42 elif implementation == 'IronPython':43 implementation_version = platform.python_version() # Complete Guess44 else:45 implementation_version = 'Unknown'46 return {'name': implementation, 'version': implementation_version}47def info():48 """Generate information for a bug report."""49 try:50 platform_info = {51 'system': platform.system(),52 'release': platform.release(),53 }54 except IOError:55 platform_info = {56 'system': 'Unknown',57 'release': 'Unknown',58 }59 implementation_info = _implementation()60 urllib3_info = {'version': urllib3.__version__}61 chardet_info = {'version': chardet.__version__}62 pyopenssl_info = {63 'version': None,64 'openssl_version': '',65 }66 if OpenSSL:67 pyopenssl_info = {68 'version': OpenSSL.__version__,69 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,70 }71 cryptography_info = {72 'version': getattr(cryptography, '__version__', ''),73 }...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful