How to use post_tests method in avocado

Best Python code snippet using avocado_python

question.py

Source:question.py Github

copy

Full Screen

...125 del self._pre_tests126 except AttributeError:127 pass128 @property129 def post_tests(self):130 try:131 return self._post_tests132 except AttributeError:133 if self.post_tests_source:134 post_tests = parse_iospec(self.post_tests_source)135 else:136 post_tests = IoSpec()137 self._post_tests = ejudge.combine_iospec(138 self.pre_tests, post_tests)139 return self._post_tests140 @post_tests.setter141 def post_tests(self, value):142 pre_tests = self.pre_tests143 value = IoSpec([test for test in value if test not in pre_tests])144 self._post_tests = ejudge.combine_iospec(self.pre_tests, value)145 self.post_tests_source = value.source()146 @post_tests.deleter147 def post_tests(self):148 try:149 del self._post_tests150 except AttributeError:151 pass152 submission_class = CodingIoSubmission153 def load_post_file_data(self, file_data):154 fake_post = super().load_post_file_data(file_data)155 fake_post['pre_tests_source'] = self.pre_tests_source156 fake_post['post_tests_source'] = self.post_tests_source157 return fake_post158 # Expanding and controlling the tests state159 def has_test_state_changed(self):160 """161 Return True if test state has changed.162 """163 return self.test_state_hash == compute_test_state_hash(self)164 def get_current_test_state(self, update=False):165 """166 Return a current TestState object synchronized with the current167 pre and post tests.168 It raises a ValidationError if an error is encountered during the169 recreation of the test state.170 """171 if update:172 hash = compute_test_state_hash(self)173 else:174 hash = self.test_state_hash175 try:176 return TestState.objects.get(question=self, hash=hash)177 except TestState.DoesNotExist:178 pre_tests = self.pre_tests179 post_tests = self.post_tests180 def expand(x):181 result = expand_tests(self, x)182 check_expansions_with_all_programs(self, result)183 return result184 pre_source = expand(pre_tests).source()185 post_source = expand(post_tests).source()186 return TestState.objects.create(187 question=self,188 hash=hash,189 pre_tests_source=self.pre_tests_source,190 post_tests_source=self.post_tests_source,191 pre_tests_source_expansion=pre_source,192 post_tests_source_expansion=post_source,193 )194 def get_expanded_pre_tests(self):195 """196 Return an IoSpec object with the result of pre tests expansions.197 """198 state = self.get_current_test_state()199 source = state.pre_tests_source_expansion200 return parse_iospec(source)201 def get_expand_post_tests(self):202 """203 Return an IoSpec object with the result of post tests expansions.204 """205 state = self.get_current_test_state()206 source = state.post_tests_source_expansion207 return parse_iospec(source)208 def __expand_tests_to_source(self, tests):209 """210 Return the source of a iospec object full expansion.211 Similar to .expand_tests(), but return a string with the source code212 expansion.213 """214 if tests is None:215 return ''216 return self._expand_tests(tests)217 # Code runners218 def check_with_code(self, source, tests, language=None, timeout=None):219 """220 Wrapped version of check_with_code() that uses question's own timeout221 and language as default.222 """223 language = get_programming_language(language or self.language)224 timeout = timeout or self.timeout225 ejudge.check_with_code(source, tests, language, timeout)226 def run_code(self, source, tests, language=None, timeout=None):227 """228 Wrapped version of run_code() that uses question's own timeout229 and language as default.230 """231 language = get_programming_language(language or self.language)232 timeout = timeout or self.timeout233 return ejudge.run_code(source, tests, language, timeout)234 def grade_code(self, source, inputs, language=None, timeout=None):235 """236 Wrapped version of grade_code() that uses question's own timeout237 and language as default.238 """239 language = get_programming_language(language or self.language)240 timeout = timeout or self.timeout241 return ejudge.grade_code(source, inputs, language, timeout)242 def expand_from_code(self, source, inputs, language=None, timeout=None):243 """244 Wrapped version of expand_from_code() that uses question's own timeout245 and language as default.246 """247 language = get_programming_language(language or self.language)248 timeout = timeout or self.timeout249 return ejudge.expand_from_code(source, inputs, language, timeout)250 # Saving & validation251 def save(self, *args, **kwargs):252 self.test_state_hash = compute_test_state_hash(self)253 if not self.author_name and self.owner:254 name = self.owner.get_full_name() or self.owner.username255 email = self.owner.email256 self.author_name = '%s <%s>' % (name, email)257 super().save(*args, **kwargs)258 def clean(self):259 super().clean()260 if self.has_test_state_changed() or self.has_code_changed():261 logger.debug('%r: recomputing tests' % self.title)262 self.schedule_validation()263 def full_clean(self, *args, **kwargs):264 if self.__answers:265 self.answers = self.__answers266 super().full_clean(*args, **kwargs)267 def full_clean_expansions(self):268 self.get_current_test_state(update=True)269 def full_clean_answer_keys(self):270 """271 Performs a full_clean() validation step on all answer key objects.272 """273 for key in self.answers.all():274 try:275 key.question = self276 key.full_clean()277 except ValidationError as ex:278 raise validators.invalid_related_answer_key_error(key, ex)279 def full_clean_all(self, *args, **kwargs):280 self.full_clean(*args, **kwargs)281 self.full_clean_answer_keys()282 self.full_clean_expansions()283 def schedule_validation(self):284 """285 Schedule full validation to be performed in the background.286 This executes the full_clean_code() method287 """288 print('scheduling full code validation... (we are now executing on the'289 'foreground).')290 self.mark_invalid_code_fields()291 def mark_invalid_code_fields(self):292 """293 Performs a full code validation with .full_clean_code() and marks all294 errors found in the question.295 """296 return297 try:298 self.full_clean(force_expansions=True)299 except ValidationError as ex:300 print(ex)301 print(dir(ex))302 raise303 def validate_tests(self):304 """305 Triggered when (pre|post)_test_source changes or on the first time the306 .clean() method is called.307 """308 # Check if new source is valid309 for attr in ['pre_tests_source', 'post_tests_source']:310 try:311 source = getattr(self, attr)312 if source:313 iospec = parse_iospec(source)314 else:315 iospec = None316 setattr(self, attr[:-7], iospec)317 except Exception as ex:318 self.clear_tests()319 raise ValidationError(320 {attr: _('invalid iospec syntax: %s' % ex)}321 )322 # Computes temporary expansions for all sources. A second step may be323 # required in which we use the reference source in answer key to further324 # expand iospec data structures325 iospec = self.pre_tests.copy()326 iospec.expand_inputs(self.number_of_pre_expansions)327 self.pre_tests_expanded = iospec328 if self.pre_tests_source and self.post_tests_source:329 iospec = ejudge.combine_iospecs(self.pre_tests, self.post_tests)330 elif self.post_tests_source:331 iospec = self.post_tests.copy()332 elif self.pre_tests_source:333 iospec = self.pre_tests.copy()334 else:335 raise ValidationError(_(336 'either pre_tests_source or post_tests_source must be given!'337 ))338 iospec.expand_inputs(self.number_of_post_expansions)339 # assert len(iospec) >= self.number_of_expansions, iospec340 self.post_tests_expanded = iospec341 if self.pre_tests_expanded.is_expanded and \342 self.post_tests_expanded.is_expanded:343 self.pre_tests_expanded_source = self.pre_tests_expanded.source()344 self.post_tests_expanded_source = self.post_tests_expanded.source()345 else:346 self._expand_from_answer_keys()347 # Iospec is valid: save the hash348 self.tests_state_hash = self.current_tests_hash349 def _expand_from_answer_keys(self):350 # If the source requires expansion, we have to check all answer keys351 # to see if one of them defines a valid source and compute the expansion352 # from this source. All languages must produce the same expansion,353 # otherwise it is considered to be an error.354 #355 # If no answer key is available, leave pre_tests_expanded_source blank356 assert self.pre_tests_expanded is not None357 assert self.post_tests_expanded is not None358 pre, post = self.pre_tests_expanded, self.post_tests_expanded359 useful_keys = list(self.answers_with_code())360 if useful_keys:361 ex_pre = pre.copy()362 ex_pre.expand_inputs(self.number_of_pre_expansions)363 ex_post = post.copy()364 ex_post.expand_inputs(self.number_of_post_expansions)365 pre_list = self.answers.expand_all(ex_pre)366 post_list = self.answers.expand_all(ex_post)367 if len(pre_list) == len(post_list) == 1:368 ex_pre = pre_list[0]369 ex_post = post_list[0]370 else:371 def validate(L, field):372 first, *tail = L373 for i, elem in enumerate(tail, 1):374 if first == elem:375 continue376 lang1 = useful_keys[0].language377 lang2 = useful_keys[i].language378 first.language = lang1379 elem.language = lang2380 self.clear_tests()381 raise validators.inconsistent_testcase_error(first,382 elem,383 field)384 validate(pre_list, 'pre_tests_expanded_source')385 validate(post_list, 'post_tests_expanded_source')386 ex_pre, ex_post = pre_list[0], post_list[0]387 # Update values388 self.pre_tests_expanded = ex_pre389 self.pre_tests_expanded_source = ex_pre.source()390 self.post_tests_expanded = ex_pre391 self.post_tests_expanded_source = ex_post.source()392 # Data access393 def get_placeholder(self, language=None):394 """395 Return the placeholder text for the given language.396 """397 key = self.answers.get(language or self.language, None)398 if key is None:399 return self.default_placeholder400 return key.placeholder401 def get_reference_source(self, language=None):402 """403 Return the reference source code for the given language or None, if no404 reference is found.405 """406 if language is None:407 language = self.language408 qs = self.answers.all().filter(409 language=get_programming_language(language))410 if qs:411 return qs.get().source412 return ''413 def get_submission_kwargs(self, request, kwargs):414 return dict(language=kwargs['language'], source=kwargs['source'])415 # Access answer key queryset416 def answers_with_code(self):417 """418 Filter only answers that define a program.419 """420 return self.answers.exclude(source='')421 def has_code_changed(self):422 """423 True if some answer source for a valid code has changed.424 """425 keys = self.answers_with_code()426 for key in keys:427 if key.has_changed_source():428 return True429 return False430 # Actions431 def submit(self, user_or_request, language=None, **kwargs):432 if language and self.language:433 if language != self.language:434 args = language, self.language435 raise ValueError('cannot set language: %r != %r' % args)436 if self.language:437 language = self.language438 language = get_programming_language(language)439 return super().submit(user_or_request, language=language, **kwargs)440 def run_post_grading(self, **kwargs):441 """442 Runs post tests for all submissions made to this question.443 """444 for response in self.responses.all():445 response.run_post_grading(tests=self.post_tests_expanded, **kwargs)446 self.closed = True447 self.save()448 def nav_section_for_activity(self, request):449 url = self.get_absolute_url450 section = NavSection(451 __('Question'), url(), title=__('Back to question')452 )453 if self.rules.test(request.user, 'activities.edit_activity'):454 section.add_link(455 __('Edit'), self.get_admin_url(), title=__('Edit question')456 )457 section.add_link(458 __('Submissions'), url('submissions'),459 title=__('View your submissions')460 )461 return section462 # Serving pages and routing463 template = 'questions/coding_io/detail.jinja2'464 template_submissions = 'questions/coding_io/submissions.jinja2'465 template_statistics = 'questions/coding_io/statistics.jinja2'466 template_debug = 'questions/coding_io/debug.jinja2'467 def get_context(self, request, *args, **kwargs):468 context = dict(super().get_context(request, *args, **kwargs),469 form=True)470 # Select default mode for the ace editor471 if self.language:472 context['default_mode'] = self.language.ace_mode()473 else:474 context['default_mode'] = get_config('CODESCHOOL_DEFAULT_ACE_MODE',475 'python')476 # Enable language selection477 if self.language is None:478 context['select_language'] = True479 context['languages'] = ProgrammingLanguage.supported.all()480 else:481 context['select_language'] = False482 return context483 def serve_ajax_submission(self, client, source=None, language=None,484 **kwargs):485 """486 Handles student responses via AJAX and a srvice program.487 """488 # User must choose language489 if not language or language == '-----':490 if self.language is None:491 fmt = _('Error'), _('Please select the correct language')492 client.dialog(493 '<p class="dialog-text"><h2>%s</h2><p>%s</p></p>' % fmt494 )495 return None496 language = self.language497 else:498 language = get_programming_language(language)499 return super().serve_ajax_submission(500 client=client,501 language=language,502 source=source,503 )504 @srvice.route(r'^placeholder/$')505 def route_placeholder(self, request, language):506 """507 Return the placeholder code for some language.508 """509 return self.get_placehoder(language)510 #511 # Actions512 #513 def regrade_post(self):514 """515 Regrade all submissions using the post tests.516 """517 self.responses.regrade_with(self.post_tests_expanded)518 def action_expand_tests(self, client, *args, **kwargs):519 self._expand_tests()520 pre = escape(self.pre_tests_expanded_source)521 post = escape(self.post_tests_expanded_source)522 client.dialog('<h2>Pre-tests</h2><pre>%s</pre>'523 '<h2>Post-test</h2><pre>%s</pre>' % (pre, post))524 def action_grade_with_post_tests(self, client, *args, **kwargs):525 self.regrade_post()526 client.dialog('<p>Successful operation!</p>')527def compute_test_state_hash(question):528 source_hashes = question.answers.values_list('source_hash', flat=True)529 return md5hash_seq([530 question.pre_tests_source,531 question.post_tests_source,532 '%x%x%f' % (question.num_pre_tests, question.num_post_tests,533 question.timeout),534 '\n'.join(source_hashes),535 ])536#537# Utility functions538#...

Full Screen

Full Screen

test_integration.py

Source:test_integration.py Github

copy

Full Screen

1import pytest2from codeschool.lms.activities.models import Feedback3from codeschool.questions.coding_io.models.question import expand_tests4from codeschool.questions.coding_io.tests.test_models import example, source5pytestmark = pytest.mark.integration6def test_submission_correct_response_hello(db, user, request_with_user):7 question = example('simple')8 src = source('hello.py')9 submission = question.submit(request_with_user,10 source=src,11 language='python')12 feedback = submission.auto_feedback()13 assert feedback.given_grade_pc == 10014 assert feedback.final_grade_pc == 10015 assert feedback.is_correct is True16def test_submission_correct_response_fibonacci(db, user, request_with_user):17 question = example('fibonacci')18 src = source('fibonacci.py')19 submission = question.submit(request_with_user,20 source=src,21 language='python')22 feedback = submission.auto_feedback()23 assert feedback.is_correct is True24def test_submission_with_presentation_error_hello(db, user, request_with_user):25 question = example('simple')26 submission = question.submit(request_with_user,27 source=source('hello-presentation.py'),28 language='python')29 feedback = submission.auto_feedback()30 assert feedback.is_correct is False31 assert feedback.feedback_status == 'presentation-error'32 assert feedback.is_presentation_error33 assert feedback.given_grade_pc < 10034def test_submission_with_wrong_answer_hello(db, user, request_with_user):35 question = example('simple')36 submission = question.submit(request_with_user,37 source=source('hello-wrong.py'),38 language='python')39 feedback = submission.auto_feedback()40 assert feedback.is_correct is False41 assert feedback.feedback_status == 'wrong-answer'42 assert feedback.is_wrong_answer43 assert feedback.final_grade_pc == 044def test_submission_with_runtime_error_hello(db, user, request_with_user):45 question = example('simple')46 submission = question.submit(request_with_user,47 source=source('hello-runtime.py'),48 language='python')49 feedback = submission.auto_feedback()50 assert feedback.is_correct is False51 assert feedback.feedback_status == 'runtime-error'52 assert feedback.is_runtime_error53 assert feedback.final_grade_pc == 054def test_submission_with_invalid_syntax_hello(db, user, request_with_user):55 question = example('simple')56 submission = question.submit(request_with_user,57 source=source('hello-build.py'),58 language='python')59 feedback = submission.auto_feedback()60 assert feedback.is_correct is False61 assert feedback.feedback_status == 'build-error'62 assert feedback.is_build_error63 assert feedback.final_grade_pc == 064def test_submission_feedback_keeps_the_correct_code_hello(db, user,65 request_with_user):66 question = example('simple')67 submission = question.submit(request_with_user,68 source=source('hello-build.py'),69 language='python')70 feedback = submission.auto_feedback()71 db_fb = Feedback.objects.get(id=submission.id)72 assert feedback.feedback_status == db_fb.feedback.status73@pytest.mark.skip('ejduge not catching timeout errors?')74def test_stop_execution_of_submission_after_timeout_hello(db, user,75 request_with_user):76 question = example('simple')77 question.timeout = 0.3578 submission = question.submit(request_with_user,79 source=source('hello-timeout.py'),80 language='python')81 feedback = submission.auto_feedback()82 assert feedback.is_correct is False83 assert feedback.feedback_status == 'timeout-error'84 assert feedback.is_timeout_error85 assert feedback.final_grade_pc == 086# Test expansions87def test_tests_expansion_fibonacci(db):88 question = example('fibonacci')89 tests = expand_tests(question, question.pre_tests)90 assert tests.is_simple91 assert tests.is_standard_test_case92def test_expand_iospec_source_with_commands(db):93 src = 'print(input("x: "))'94 question = example('hello-commands')95 question.full_clean_all()96def test_hello_question_creation(db):97 question = example('hello')98 question.full_clean_all()99 assert question.get_reference_source(100 'python') == source('hello.py').strip()101 assert question.title == 'Hello Person'102 assert question.pre_tests is not None103 assert question.pre_tests.is_expanded is True104 assert question.post_tests is not None105 assert question.post_tests.is_expanded is False106 assert question.answers.count() == 1107def test_fibonacci_question_creation(db):108 question = example('fibonacci')109 question.full_clean_all()110 assert question.pre_tests is not None111 assert question.pre_tests.is_expanded is False112 assert question.post_tests is not None113 assert question.post_tests.is_expanded is False...

Full Screen

Full Screen

definitions.py

Source:definitions.py Github

copy

Full Screen

1from tests.common import * # NOTE: Not a good practice in general, but it is not going to matter in this case2POST_TESTS = {3 "1":4 {5 "description": "create new account with @username1 and @password1",6 "method": "POST",7 "username": "%s" % (username1),8 "password": "%s" % (password1),9 "paths": {"name": "deep", "age": "28"},10 "serverResponse": ["201", "Created"],11 "dependOnTests": "",12 "disableTest": "0"13 },14 "2":15 {16 "description": "create existing account with @username1 and @password1",17 "method": "POST",18 "username": "%s" % (username1),19 "password": "%s" % (password1),20 "paths": {"name": "deep", "age": "28"},21 "serverResponse": ["409", "Conflict"],22 "dependOnTests": "",23 "disableTest": "0"24 },25 "3":26 {27 "description": "add new attributes in existing account with @username1 and @password1",28 "method": "POST",29 "username": "%s" % (username1),30 "password": "",31 "paths": {"office": "london", "phone-number": "999-999-0000"},32 "serverResponse": ["200", "OK"],33 "dependOnTests": "POST_TESTS:1",34 "disableTest": "0"35 },36 "4":37 {38 "description": "authenticate account with @username1 and @password1",39 "method": "POST",40 "username": "%s" % (username1),41 "password": "%s" % (password1),42 "paths": {},43 "serverResponse": ["200", "OK"],44 "dependOnTests": "POST_TESTS:1",45 "disableTest": "0"46 }47}48GET_TESTS = {49 "1":50 {51 "description": "get all attributes from account with @username1",52 "method": "GET",53 "username": "%s" % (username1),54 "password": "",55 "paths": [],56 "serverResponse": ["name", "deep", "age", "28", "OK", "200", "password"],57 "dependOnTests": "POST_TESTS:1",58 "disableTest": "0"59 },60 "2":61 {62 "description": "get a non existing attribute from account with @username1",63 "method": "GET",64 "username": "%s" % (username1),65 "password": "",66 "paths": ["fav-movie-name"],67 "serverResponse": ["200", "OK", "fav-movie-name", "Not Exist/Error"],68 "dependOnTests": "POST_TESTS:1",69 "disableTest": "0"70 },71 "3":72 {73 "description": "get name attribute from account with @username1 after deleting name",74 "method": "GET",75 "username": "%s" % (username1),76 "password": "",77 "paths": ["name"],78 "serverResponse": ["200", "OK", "Not Exist/Error"],79 "dependOnTests": "POST_TESTS:1 DELETE_TESTS:1",80 "disableTest": "0"81 }82}83PUT_TESTS = {84 "1":85 {86 "description": "update attribute - name - in account @username1",87 "method": "PUT",88 "username": "%s" % (username1),89 "password": "",90 "paths": {"name": "mike"},91 "serverResponse": ["200", "OK", "name"],92 "dependOnTests": "POST_TESTS:1",93 "disableTest": "0"94 },95 "2":96 {97 "description": "update attribute - office - in non-existing account @incorrectUsername",98 "method": "PUT",99 "username": "%s" % (incorrectUsername),100 "password": "",101 "paths": {"office": "china"},102 "serverResponse": ["404", "Not Found"],103 "dependOnTests": "",104 "disableTest": "0"105 }106}107DELETE_TESTS = {108 "1":109 {110 "description": "delete - name - attribute from account with @username1",111 "method": "DELETE",112 "username": "%s" % (username1),113 "password": "",114 "paths": ["name"],115 "serverResponse": ["200", "OK"],116 "dependOnTests": "POST_TESTS:1",117 "disableTest": "0"118 },119 "2":120 {121 "description": "delete - age - attribute from non-existing account with @incorrectUsername",122 "method": "DELETE",123 "username": "%s" % (incorrectUsername),124 "password": "",125 "paths": ["age"],126 "serverResponse": ["404", "Not Found"],127 "dependOnTests": "",128 "disableTest": "0"129 }...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful