How to use run_mocked method in responses

Best Python code snippet using responses

test_run.py

Source:test_run.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2import os3import unittest4import argparse5try:6 from unittest import mock7except ImportError:8 import mock9from conan_sword_and_sorcery.ci.run import main, run10from conan_sword_and_sorcery.ci.compilers.clang import CompilerClangApple11from conan_sword_and_sorcery.ci.runners.base_runner import DRY_RUN, SUCCESS, FAIL12from conan_sword_and_sorcery.utils.environ import context_env13from conan_sword_and_sorcery.utils import backup_file14from tests.utils import parse_remote_list15@mock.patch('argparse.ArgumentParser.parse_args')16class TestMain(unittest.TestCase):17 @mock.patch('conan_sword_and_sorcery.ci.run.run', return_value='mocked')18 def test_main_invalid_conanfile(self, run_mocked, argparse_mocked):19 argparse_mocked.return_value = argparse.Namespace(dry_run=True, conanfile='here', verbose_count=0)20 with self.assertRaises(SystemExit):21 main()22 @mock.patch('conan_sword_and_sorcery.ci.run.run', return_value='mocked')23 def test_main(self, run_mocked, argparse_mocked):24 conanfile = os.path.join(os.path.dirname(__file__), '..', 'files', 'single', 'conanfile01.py')25 argparse_mocked.return_value = argparse.Namespace(dry_run=True, conanfile=conanfile, verbose_count=0, conan_options=None, conan_username=None, conan_channel=None)26 self.assertEqual(main(), "mocked")27 @mock.patch('conan_sword_and_sorcery.ci.run.run')28 def test_main_options(self, run_mocked, argparse_mocked):29 conanfile = os.path.join(os.path.dirname(__file__), '..', 'files', 'single', 'conanfile01.py')30 argparse_mocked.return_value = argparse.Namespace(dry_run=True, conanfile=conanfile, verbose_count=0, conan_options="opt1", conan_username=None, conan_channel=None)31 def check_env(conanfile, dry_run):32 self.assertEqual(dry_run, True)33 self.assertEqual(os.environ['CONAN_OPTIONS'], 'opt1')34 return 'check_env'35 run_mocked.side_effect = check_env36 self.assertEqual(main(), "check_env")37 @mock.patch('conan_sword_and_sorcery.ci.run.run')38 def test_main_username(self, run_mocked, argparse_mocked):39 conanfile = os.path.join(os.path.dirname(__file__), '..', 'files', 'single', 'conanfile01.py')40 argparse_mocked.return_value = argparse.Namespace(dry_run=True, conanfile=conanfile, verbose_count=0, conan_options=None,41 conan_username="username", conan_channel=None)42 def check_env(conanfile, dry_run):43 self.assertEqual(dry_run, True)44 self.assertEqual(os.environ['CONAN_USERNAME'], 'username')45 return 'check_env'46 run_mocked.side_effect = check_env47 self.assertEqual(main(), "check_env")48 @mock.patch('conan_sword_and_sorcery.ci.run.run')49 def test_main_channel(self, run_mocked, argparse_mocked):50 conanfile = os.path.join(os.path.dirname(__file__), '..', 'files', 'single', 'conanfile01.py')51 argparse_mocked.return_value = argparse.Namespace(dry_run=True, conanfile=conanfile, verbose_count=0, conan_options=None,52 conan_username=None, conan_channel="channel")53 def check_env(conanfile, dry_run):54 self.assertEqual(dry_run, True)55 self.assertEqual(os.environ['CONAN_CHANNEL'], 'channel')56 return 'check_env'57 run_mocked.side_effect = check_env58 self.assertEqual(main(), "check_env")59class Runner4Testing:60 def __init__(self, run_return, n_jobs=None):61 self.run_return = run_return62 self.n_jobs = n_jobs63 def enumerate_jobs(self):64 jobs = [(CompilerClangApple(version='4.9', arch='x86', build_type='Release', libcxx='libstdc++11', ), {}),65 (CompilerClangApple(version='4.9', arch='x86_64', build_type='Release', libcxx='libstdc++11', ), {})]66 if self.n_jobs:67 jobs = jobs[:self.n_jobs]68 return jobs69 def is_stable_branch(self):70 return False71 def set_compiler(self, compiler):72 pass73 def set_profile(self, profile_file):74 pass75 def run(self, options, username, channel):76 return self.run_return77 def upload(self, *args, **kwargs):78 pass79class TestRun(unittest.TestCase):80 def run(self, *args, **kwargs):81 with backup_file(os.path.join(os.path.expanduser("~"), '.conan', 'registry.txt')):82 return super(TestRun, self).run(*args, **kwargs)83 def setUp(self):84 self.conanfile = os.path.join(os.path.dirname(__file__), '..', 'files', 'single', 'conanfile01.py')85 @mock.patch('conan_sword_and_sorcery.ci.run.RunnerRegistry.get_runner', return_value=Runner4Testing(run_return=SUCCESS))86 def test_run_success(self, runner_mocked):87 self.assertEqual(run(conanfile=self.conanfile, dry_run=False), 0)88 @mock.patch('conan_sword_and_sorcery.ci.run.RunnerRegistry.get_runner', return_value=Runner4Testing(run_return=FAIL))89 def test_run_fail(self, runner_mocked):90 self.assertEqual(run(conanfile=self.conanfile, dry_run=False), -1)91 @mock.patch('conan_sword_and_sorcery.ci.run.RunnerRegistry.get_runner', return_value=Runner4Testing(run_return=SUCCESS))92 def test_run_pages(self, runner_mocked):93 with context_env(CONAN_TOTAL_PAGES="2", CONAN_CURRENT_PAGE="1", ):94 self.assertEqual(run(conanfile=self.conanfile, dry_run=False), 0)95 @mock.patch('conan_sword_and_sorcery.ci.run.RunnerRegistry.get_runner', return_value=Runner4Testing(run_return=SUCCESS, n_jobs=1))96 def test_run_pages_invalid(self, runner_mocked):97 with context_env(CONAN_TOTAL_PAGES="2", CONAN_CURRENT_PAGE="1", ):98 with self.assertRaises(AssertionError):99 self.assertEqual(run(conanfile=self.conanfile, dry_run=False), 0)100 @mock.patch('conan_sword_and_sorcery.ci.run.RunnerRegistry.get_runner')101 def test_run_remotes(self, runner_mocked):102 initial_remotes = set([it[1] for it in parse_remote_list()])103 class Runner4TestingEnvCheck(Runner4Testing):104 def run(_, options, username, channel):105 remotes = set([it[1] for it in parse_remote_list()])106 self.assertEqual(len(initial_remotes) + 2, len(remotes))107 return SUCCESS108 runner_mocked.return_value = Runner4TestingEnvCheck(run_return=SUCCESS)109 with context_env(CONAN_REMOTES="http://remote1.com,http://remote2.com", CONAN_UPLOAD="http://remote1.com"):110 self.assertEqual(run(conanfile=self.conanfile, dry_run=False), 0)111 @mock.patch('conan_sword_and_sorcery.ci.run.RunnerRegistry.get_runner')112 def test_run_remotes_and_upload(self, runner_mocked):113 initial_remotes = set([it[1] for it in parse_remote_list()])114 class Runner4TestingEnvCheck(Runner4Testing):115 def run(_, options, username, channel):116 remotes = set([it[1] for it in parse_remote_list()])117 self.assertEqual(len(initial_remotes) + 2 + 1, len(remotes))118 return SUCCESS119 runner_mocked.return_value = Runner4TestingEnvCheck(run_return=SUCCESS)120 with context_env(CONAN_REMOTES="http://remote1.com,http://remote2.com", CONAN_UPLOAD="http://remote3.com"):...

Full Screen

Full Screen

test_local.py

Source:test_local.py Github

copy

Full Screen

1from shutil import rmtree2from os import getcwd, environ3from argparse import ArgumentParser, Namespace4import pytest5from _pytest.tmpdir import TempPathFactory6from pytest_mock import MockerFixture7from grizzly_cli.local import create_parser, local_run, local8from .helpers import onerror9CWD = getcwd()10def test_local(mocker: MockerFixture) -> None:11 run_mocked = mocker.patch('grizzly_cli.local.run', return_value=0)12 arguments = Namespace(subcommand='run')13 assert local(arguments) == 014 assert run_mocked.call_count == 115 args, _ = run_mocked.call_args_list[0]16 assert args[0] is arguments17 assert args[1] is local_run18 arguments = Namespace(subcommand='foo')19 with pytest.raises(ValueError) as ve:20 local(arguments)21 assert 'unknown subcommand foo' == str(ve.value)22def test_local_run(mocker: MockerFixture, tmp_path_factory: TempPathFactory) -> None:23 run_command = mocker.patch('grizzly_cli.local.run_command', side_effect=[0])24 test_context = tmp_path_factory.mktemp('test_context')25 (test_context / 'test.feature').write_text('Feature:')26 parser = ArgumentParser()27 sub_parsers = parser.add_subparsers(dest='test')28 create_parser(sub_parsers)29 try:30 assert environ.get('GRIZZLY_TEST_VAR', None) is None31 arguments = parser.parse_args([32 'local', 'run', f'{test_context}/test.feature',33 ])34 assert local_run(35 arguments,36 {37 'GRIZZLY_TEST_VAR': 'True',38 },39 {40 'master': ['--foo', 'bar', '--master'],41 'worker': ['--bar', 'foo', '--worker'],42 'common': ['--common', 'true'],43 },44 ) == 045 assert run_command.call_count == 146 args, _ = run_command.call_args_list[-1]47 assert args[0] == [48 'behave',49 f'{test_context}/test.feature',50 '--foo', 'bar', '--master',51 '--bar', 'foo', '--worker',52 '--common', 'true',53 ]54 assert environ.get('GRIZZLY_TEST_VAR', None) == 'True'55 finally:56 rmtree(test_context, onerror=onerror)57 try:58 del environ['GRIZZLY_TEST_VAR']59 except:...

Full Screen

Full Screen

test_create_video.py

Source:test_create_video.py Github

copy

Full Screen

1# Python2from unittest.mock import patch3# Third4import pytest5# Apps6from tests import factories7from apps.videos.views import create_video8from apps.videos.models import Video9from apps.videos.serializers import VideoCreateSerializer10def make_video_data():11 video_data = factories.VideoFactory.build()12 genre = factories.GenreWithCategoryFactory.create()13 category_pk = genre.categories.first().pk14 return {15 "title": video_data.title,16 "description": "some item",17 "categories": [18 category_pk,19 ],20 "genres": [genre.pk.__str__()],21 "year_launched": video_data.year_launched,22 "rating": video_data.rating,23 "duration": video_data.duration,24 }25@pytest.mark.django_db(reset_sequences=True)26@pytest.mark.integration27@patch("apps.videos.events.VideoCreated.run")28def test_create_video_call_tasks(run_mocked):29 serializer_class = VideoCreateSerializer30 data = make_video_data()31 serializer = serializer_class(data=data)32 serializer.is_valid()33 instance = create_video(serializer=serializer)34 run_mocked.assert_called_once()35@pytest.mark.django_db(reset_sequences=True)36@pytest.mark.integration37@patch("apps.videos.events.VideoCreated.run")38def test_create_video_returns_an_instance(run_mocked):39 serializer_class = VideoCreateSerializer40 data = make_video_data()41 serializer = serializer_class(data=data)42 serializer.is_valid()43 instance = create_video(serializer=serializer)44 run_mocked.assert_called_once()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run responses automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful