How to use put_back method in autotest

Best Python code snippet using autotest_python

css_linter.py

Source:css_linter.py Github

copy

Full Screen

...247 self._less_mixins = {} # => name => parameter list248 self._parse()249 return self._results250 def _parser_putback_recover(self, tok):251 self._tokenizer.put_back(tok)252 raise SyntaxError()253 def _parse(self):254 self._at_start = True255 self._charset = "UTF-8"256 self._parse_top_level()257 def _parse_ruleset(self):258 self._parse_selector()259 while True:260 tok = self._tokenizer.get_next_token()261 if tok['style'] == EOF_STYLE:262 self._add_result("expecting a block of declarations", tok)263 return264 self._check_tag_tok(tok, 1)265 if not self._classifier.is_operator(tok, ","):266 break267 self._parse_selector()268 self._parse_declarations(tok) # tok is the non-comma, should be "{"269 def _parse_selector(self, resolve_selector_property_ambiguity=False):270 """271 selector : simple_selector [ combinator selector272 | S [ combinator? selector ]?273 ]? ;274 simple_selector : element_name [HASH | class | attrib | pseudo ]*275 | [HASH | class | attrib | pseudo ]+;276 Instead, here we'll loop through selectors, allowing277 combinators if we find them.278 Note that error-recovery takes place here, not at the top-level.279 """280 require_simple_selector = True281 while True:282 res = self._parse_simple_selector(require_simple_selector,283 resolve_selector_property_ambiguity)284 if not res:285 break286 if resolve_selector_property_ambiguity and not require_simple_selector:287 # More than one simple selector in a row means it's not a288 # property289 self._saw_selector = True290 require_simple_selector = False291 tok = self._tokenizer.get_next_token()292 if tok['style'] == EOF_STYLE:293 # bug 94621 -- If we're on EOF while processing a selector,294 # give up on this loop295 break296 self._check_tag_tok(tok, 2)297 if not self._classifier.is_operator_choose(tok, ("+", ">", "~")):298 self._tokenizer.put_back(tok)299 else:300 require_simple_selector = True301 def _pseudo_element_check(self, tok, saw_pseudo_element):302 if saw_pseudo_element:303 self._add_result_tok_parts(304 "Pseudo elements must appear at the end of a selector chain",305 tok['start_line'], tok['start_column'],306 tok['end_line'], tok['end_column'],307 "", 1)308 def _reparse_structural_tokens(self, tok):309 # Just pull in all the text up to ')', and build a text part,310 # keeping spaces, keeping whitespace311 # Look for this:312 # ['-'|'+']? INTEGER? {N} [S* ['-'|'+'] S* INTEGER]?313 # This routine repackages the tokens, because strings like "-3n"314 # appear as an unknown identifier, but should be three separate315 # tokens. The last token will be the end-token, normally a ")" op.316 ret_toks = []317 num_ptn = re.compile(r'(\d+)(.*)')318 while True:319 if (tok['style'] == EOF_STYLE or320 (self._classifier.is_operator(tok) and tok['text'] in ");}{}")):321 ret_toks.append(tok)322 self._tokenizer.put_back(tok)323 return ret_toks324 tokText = tok['text']325 while True:326 if tokText.startswith("-") or tokText.startswith("+"):327 newTok = tok.copy()328 newTok['text'] = '-'329 tok['text'] = tokText[0]330 tok['end_column'] = tok['start_column'] + 1331 tok['style'] = ScintillaConstants.SCE_CSS_OPERATOR332 ret_toks.append(tok)333 newTok['start_column'] = tok['end_column']334 tok = newTok335 tokText = tokText[1:]336 else:337 m = num_ptn.match(tokText)338 if m:339 newTok = tok.copy()340 newTok['text'] = '-'341 tok['text'] = m.group(1)342 tok['end_column'] = tok[343 'start_column'] + len(tok['text'])344 tok['style'] = ScintillaConstants.SCE_CSS_NUMBER345 ret_toks.append(tok)346 newTok['start_column'] = tok['end_column']347 tok = newTok348 tokText = m.group(2)349 elif tokText[0].lower() == "n":350 newTok = tok.copy()351 newTok['text'] = '-'352 tok['text'] = tokText[0]353 tok['end_column'] = tok['start_column'] + 1354 tok['style'] = ScintillaConstants.SCE_CSS_VALUE355 ret_toks.append(tok)356 newTok['start_column'] = tok['end_column']357 tok = newTok358 tokText = tokText[1:]359 else:360 # Just append it and deal with it later361 ret_toks.append(tok)362 tok = self._tokenizer.get_next_token()363 break # break inner loop, go back to outer loop364 if not tokText:365 # Start working on another token.366 tok = self._tokenizer.get_next_token()367 break # break inner loop, go back to outer loop368 # end while tokText369 # end while looping over tokens waiting for a ")"370 def _parse_structural_pseudo_class_arg(self):371 """ Weird grammar:372 nth : S* nthPart S* ;373 nthPart: ['-'|'+']? INTEGER? {N} [S* ['-'|'+'] S* INTEGER]?374 | ['-'|'+']? INTEGER375 | {ODD}376 | {EVEN}377 ;378 Note that + will be colored as an op, but - will be colored as379 an unknown identifier380 """381 tok = self._tokenizer.get_next_token()382 if self._classifier.is_tag(tok) and tok['text'].lower() in ("odd", "even"):383 return384 tokens = self._reparse_structural_tokens(tok)385 end_tok = tokens.pop() # This was also put back386 if not tokens:387 self._add_result("expecting a value", end_tok)388 return389 tok = tokens.pop(0)390 if self._classifier.is_operator(tok) and tok['text'] in ('-', '+'):391 tokSign = tok392 if not tokens:393 self._add_result("expecting a number or N", end_tok)394 return395 tok = tokens.pop(0)396 if tokSign['end_line'] != tok['start_line'] or tokSign['end_column'] != tok['start_column']:397 self._add_result(398 "expecting no space before %s" % tok['text'], tok)399 met_requirement = False400 tokNum = None401 if self._classifier.is_number(tok):402 if not tokens:403 return # all ok404 met_requirement = True405 tokNum = tok406 tok = tokens.pop(0)407 if self._classifier.is_value(tok) and tok['text'].lower() == 'n':408 if not tokens:409 return # all ok410 if tokNum and (tokNum['end_line'] != tok['start_line']411 or tokNum['end_column'] != tok['start_column']):412 self._add_result(413 "expecting no space before %s" % tok['text'], tok)414 tok = tokens.pop(0)415 elif not met_requirement:416 self._add_result("expecting a number or N", tok)417 # Complain and give up418 return419 else:420 # If we didn't see an 'n', we need to leave421 self._add_result("expecting ')'", tok)422 return423 # Look for a second 'sign'424 require_number = False425 if self._classifier.is_operator(tok) and tok['text'] in ('-', '+'):426 if not tokens:427 self._add_result("expecting a number", end_tok)428 return429 tok = tokens.pop(0)430 if self._classifier.is_number(tok):431 if not tokens:432 return433 else:434 self._add_result("expecting a number", tok)435 if tokens:436 self._add_result("expecting ')'", tokens[0])437 def _parse_simple_selector(self, match_required, resolve_selector_property_ambiguity):438 saw_pseudo_element = False439 current_name = None440 num_selected_names = 0441 could_have_mixin = False442 while True:443 tok = self._tokenizer.get_next_token()444 if tok['style'] == EOF_STYLE:445 break446 self._check_tag_tok(tok, 3)447 log.debug("_parse_simple_selector: got tok %s", tok)448 if self._classifier.is_tag(tok):449 # Namespace check450 tok = self._tokenizer.get_next_token()451 if self._classifier.is_operator(tok, "|"):452 tok = self._tokenizer.get_next_token()453 if not self._classifier.is_tag(tok):454 self._add_result("expecting an element name", tok)455 self._tokenizer.put_back(tok)456 else:457 self._tokenizer.put_back(tok)458 num_selected_names += 1459 self._pseudo_element_check(tok, saw_pseudo_element)460 current_name = tok['text']461 could_have_mixin = False462 elif self._classifier.is_identifier(tok):463 num_selected_names += 1464 self._pseudo_element_check(tok, saw_pseudo_element)465 if not self._supportsNestedDeclaration:466 self._add_result(467 "expecting a tag name, got unrecognized name %s (style %d)" % (468 tok['text'], tok['style']),469 tok, status=0)470 current_name = tok['text']471 could_have_mixin = False472 elif self._classifier.is_operator(tok):473 if tok['text'] == ":":474 if resolve_selector_property_ambiguity and not self._saw_selector:475 # This is the crucial point476 # Are we looking at477 # font: ....478 # or479 # a:hover ...480 # We take the easy way out and resolve this by looking at coordinates481 #482 # We also assume that anyone using Less or SCSS is more interested in483 # readability than conciseness, so we aren't dealing484 # with minified CSS.485 if self._tokenizer.next_token_is_whitespace(tok):486 self._tokenizer.put_back(tok)487 return False488 prev_tok = tok489 tok = self._tokenizer.get_next_token()490 if self._classifier.is_number(tok):491 self._tokenizer.put_back(tok)492 self._tokenizer.put_back(prev_tok)493 return False494 if not self._check_special_identifier(prev_tok, tok):495 return False496 num_selected_names += 1497 current_name = tok['text']498 if (tok['text'] in self._structural_pseudo_classes_with_args499 or tok['text'] in self._structural_pseudo_classes_other): # "not", "-moz-any"500 prev_tok = tok501 tok = self._tokenizer.get_next_token()502 if self._classifier.is_operator(tok, "("):503 if prev_tok['text'] in self._structural_pseudo_classes_with_args:504 self._parse_structural_pseudo_class_arg()505 else:506 # It's the CSS3 "not" or -moz-any selector507 while True:508 self._parse_simple_selector(509 True, resolve_selector_property_ambiguity=False)510 tok = self._tokenizer.get_next_token()511 if not self._classifier.is_operator(tok) or tok['text'] != ",":512 self._parser_putback_recover(tok)513 break514 self._parse_required_operator(")")515 else:516 if prev_tok['text'] in self._structural_pseudo_classes_args:517 self._add_result(518 "expecting a parenthesized structural pseudo-class argument")519 self._tokenizer.put_back(tok)520 # elif tok['text'] in self._structural_pseudo_classes_no_args:521 # pass # Nothing to do522 could_have_mixin = False523 elif tok['text'] in ("#", ".", "::",):524 prev_tok = tok525 could_have_mixin = (self.language == "Less"526 and prev_tok['text'] == '.'527 and num_selected_names == 0)528 tok = self._tokenizer.get_next_token()529 if could_have_mixin and self._classifier.is_mixin(tok):530 pass531 # keep going...532 elif not self._check_special_identifier(prev_tok, tok):533 return False534 num_selected_names += 1535 self._pseudo_element_check(tok, saw_pseudo_element)536 current_name = tok['text']537 if prev_tok['text'] == "::":538 saw_pseudo_element = True539 elif tok['text'] == '[':540 if resolve_selector_property_ambiguity:541 # More than one simple selector in a row means it's not542 # a property543 self._saw_selector = True544 self._parse_attribute()545 num_selected_names += 1546 could_have_mixin = False547 elif tok['text'] == '{':548 if num_selected_names == 0 and match_required:549 self._add_result("expecting a selector, got '{'", tok)550 # Short-circuit the calling loop.551 self._tokenizer.put_back(tok)552 return False553 elif tok['text'] == '}':554 if could_have_mixin and current_name in self._less_mixins:555 self._inserted_mixin = True556 self._tokenizer.put_back(tok)557 return False558 # assume we recovered to the end of a "}"559 could_have_mixin = False560 num_selected_names = 0561 continue562 elif tok['text'] == ';' and could_have_mixin and num_selected_names == 1:563 self._inserted_mixin = True564 self._tokenizer.put_back(tok)565 return566 elif tok['text'] == "&" and self.language == "SCSS":567 self._saw_selector = True568 num_selected_names += 1569 elif tok['text'] == "&" and self.language == "Less":570 tok = self._tokenizer.get_next_token()571 if (self._classifier.is_operator_choose(tok, ("#", ".", ":", "::", ","))572 or self._classifier.is_special_identifier(tok)):573 # Parse the qualifier next time around574 self._saw_selector = True575 num_selected_names += 1576 self._tokenizer.put_back(tok)577 else:578 break579 else:580 break581 if num_selected_names == 0:582 if match_required:583 self._add_result("expecting a selector, got %s" % (584 tok['text'],), tok)585 tok = self._recover(allowEOF=False, opTokens=("{", "}"))586 # We got a { or }, so short-circuit the calling loop and587 # go parse the declaration588 self._tokenizer.put_back(tok)589 return False590 # Allow either the Mozilla ( id [, id]* ) property syntax or a Less mixin declaration/insertion591 # Note that we have the token that caused us to leave the above loop592 if not self._classifier.is_operator(tok, "("):593 if (could_have_mixin594 and current_name in self._less_mixins595 and self._classifier.is_operator(tok, ";")):596 self._inserted_mixin = True597 self._tokenizer.put_back(tok)598 return True599 do_recover = False600 if could_have_mixin:601 if current_name in self._less_mixins:602 self._parse_mixin_invocation()603 self._inserted_mixin = True604 else:605 self._parse_mixin_declaration(current_name)606 return607 tok = self._tokenizer.get_next_token()608 if not self._classifier.is_tag(tok):609 self._add_result("expecting a property name", tok)610 self._tokenizer.put_back(tok)611 do_recover = True612 else:613 self._parse_identifier_list(self._classifier.is_tag, ",")614 tok = self._tokenizer.get_next_token()615 if not self._classifier.is_operator(tok, ")"):616 self._add_result("expecting ')'", tok)617 do_recover = True618 if do_recover:619 tok = self._recover(allowEOF=False, opTokens=("{", "}"))620 self._tokenizer.put_back(tok)621 return False622 return True623 def _check_special_identifier(self, prev_tok, tok):624 if (self._classifier.is_special_identifier(tok)625 or (self._supportsNestedDeclaration626 and (self._classifier.is_unknown_identifier(tok)627 or tok['style'] == ScintillaConstants.SCE_CSS_VALUE))):628 return True629 self._add_result("expecting an identifier after %s, got %s" % (630 prev_tok['text'], tok['text']), tok)631 # Give up looking at selectors632 self._tokenizer.put_back(tok)633 return False634 def _parse_attribute(self):635 tok = self._tokenizer.get_next_token()636 if not (self._classifier.is_attribute(tok)637 or self._classifier.is_identifier(tok)638 # tags can happen after *[foo] due to confused lexing639 or self._classifier.is_tag(tok)):640 self._add_result("expecting an identifier", tok)641 else:642 tok = self._tokenizer.get_next_token()643 substring_toks = ("*", "$", "^")644 attr_toks = ("]", "=", "~=", "|=")645 if (self._classifier.is_operator_choose(tok, substring_toks)646 or self._is_scss_variable(tok)):647 tok2 = self._tokenizer.get_next_token()648 if not self._classifier.is_operator_choose(tok2, "="):649 self._add_result(650 "expecting '=' after substring operator '%s'" % tok['text'], tok2)651 tok = tok2652 else:653 tok = self._tokenizer.get_next_token()654 elif tok['text'] == ']':655 return656 elif self._classifier.is_operator_choose(tok, attr_toks):657 tok = self._tokenizer.get_next_token()658 else:659 self._add_result("expecting one of %s" % (660 ', '.join(attr_toks + substring_toks),), tok)661 self._parser_putback_recover(tok)662 # At this point we've seen a '=' or other op, and should have a663 # value token in hand664 if self._classifier.is_stringeol(tok):665 self._add_result("missing string close-quote", tok)666 elif not (self._classifier.is_string(tok)667 or self._classifier.is_identifier(tok)668 or self._classifier.is_tag(tok)):669 self._add_result("expecting an identifier or string", tok)670 self._tokenizer.put_back(tok)671 return672 tok = self._tokenizer.get_next_token()673 if not self._classifier.is_operator(tok, ']'):674 self._add_result("expecting a ']'", tok)675 def _parse_assignment(self):676 """677 we saw $var or @var at top-level, expect : expression ;678 """679 self._parse_required_operator(":")680 self._parse_expression()681 self._parse_required_operator(";")682 def _parse_directive(self, prev_tok):683 tok = self._tokenizer.get_next_token()684 if not self._classifier.is_directive(tok):685 if (self._classifier.is_tag(tok)686 and (prev_tok['end_line'] != tok['start_line'] or687 prev_tok['end_column'] != tok['start_column'])):688 self._add_result_tok_parts(689 "expecting a directive immediately after @",690 prev_tok['end_line'],691 prev_tok['end_column'],692 tok['start_line'],693 tok['start_column'], "")694 else:695 self._add_result("expecting an identifier after %s" % (696 prev_tok['text']), tok)697 self._parser_putback_recover(tok)698 if tok['text'] == "charset":699 return self._parse_charset(tok)700 elif tok['text'].lower() == "import":701 if self._region > self._PARSE_REGION_SAW_IMPORT:702 self._add_result("@import allowed only near start of file",703 tok)704 elif self._region < self._PARSE_REGION_SAW_IMPORT:705 self._region = self._PARSE_REGION_SAW_IMPORT706 return self._parse_import()707 self._region = self._PARSE_REGION_SAW_OTHER708 if tok['text'].lower() == "media":709 self._parse_media()710 elif tok['text'].lower() == "page":711 self._parse_page()712 elif tok['text'].lower() == "font-face":713 self._parse_declarations()714 elif tok['text'].lower() == "namespace":715 self._parse_namespace()716 elif tok['text'].lower() == "-moz-document":717 self._parse_moz_document()718 elif self.language == "Less":719 self._parse_assignment()720 elif self.language == "SCSS":721 self._parse_scss_mixin_declaration(tok)722 else:723 self._add_result("expecting a directive after %s" % (724 prev_tok['text']), tok)725 def _parse_scss_mixin_declaration(self, tok):726 if not (self._classifier.is_directive(tok) and tok['text'] == "mixin"):727 self._add_result("expecting a directive or 'mixin'", tok)728 self._parser_putback_recover(tok)729 tok = self._tokenizer.get_next_token()730 if not self._classifier.is_tag(tok):731 self._add_result("expecting a mixin name", tok)732 self._parser_putback_recover(tok)733 tok = self._tokenizer.get_next_token()734 if self._classifier.is_operator(tok, "("):735 self._parse_mixin_invocation()736 else:737 self._tokenizer.put_back(tok)738 self._parse_declarations()739 def _parse_required_operator(self, op, tok=None):740 if tok is None:741 tok = self._tokenizer.get_next_token()742 if not self._classifier.is_operator(tok, op):743 self._add_result("expecting '%s'" % op, tok)744 self._parser_putback_recover(tok)745 def _parse_optional_operator(self, op, alt_op):746 tok = self._tokenizer.get_next_token()747 have_problem = False748 if not self._classifier.is_operator(tok):749 have_problem = True750 elif tok['text'] not in (op, alt_op):751 have_problem = True752 elif tok['text'] == alt_op:753 self._parser_putback_recover(tok)754 if have_problem:755 self._add_result("expecting '%s'" % op, tok)756 self._parser_putback_recover(tok)757 def _parse_charset(self, charset_tok):758 tok = self._tokenizer.get_next_token()759 if self._classifier.is_stringeol(tok):760 self._add_result("missing string close-quote", tok)761 elif not self._classifier.is_string(tok):762 self._add_result(763 "expecting a string after @charset, got %s" % (tok['text']),764 tok)765 self._parser_putback_recover(tok)766 self._parse_required_operator(';')767 if self._region > self._PARSE_REGION_AT_START:768 self._add_result(769 "@charset allowed only at start of file", charset_tok)770 else:771 self._region = self._PARSE_REGION_SAW_CHARSET772 def _parse_import(self):773 if (not self._parse_url()) and (not self._parse_string()):774 tok = self._tokenizer.get_next_token()775 self._add_result("expecting a string or url", tok)776 # Stay here, hope for the best.777 else:778 tok = self._tokenizer.get_next_token()779 if self._classifier.is_value(tok) and self._lex_identifier(tok):780 self._parse_identifier_list(self._classifier.is_value, ",")781 tok = self._tokenizer.get_next_token()782 self._parse_required_operator(";", tok)783 def _parse_media_list(self):784 # See w3.org/TR/css3-mediaqueries/#syntax785 self._parse_media_query()786 while True:787 tok = self._tokenizer.get_next_token()788 if tok['style'] == EOF_STYLE:789 self._add_result("expecting '{'", tok)790 raise SyntaxErrorEOF()791 if not self._classifier.is_operator(tok, ","):792 self._tokenizer.put_back(tok)793 break794 self._parse_media_query()795 def _parse_media_query(self):796 tok = self._tokenizer.get_next_token()797 if self._classifier.is_operator(tok, "("):798 # expression [ AND expression ]*799 self._tokenizer.put_back(tok)800 self._parse_media_expression()801 else:802 # [ONLY | NOT]? media_type [ AND expression ]*803 if not (self._classifier.is_value_or_identifier(tok) and self._lex_identifier(tok)):804 self._add_result(805 "expecting an identifier or a parenthesized expression", tok)806 tok = self._recover(allowEOF=True, opTokens=("{",))807 if not self._classifier.is_operator(tok, "{"):808 raise SyntaxErrorEOF()809 self._tokenizer.put_back(tok)810 return811 if tok['text'].lower() in ("only", "not"):812 tok = self._tokenizer.get_next_token()813 if not (self._classifier.is_value_or_identifier(tok) and self._lex_identifier(tok)):814 self._add_result("an identifier", tok)815 tok = self._recover(allowEOF=True, opTokens=("{",))816 if not self._classifier.is_operator(tok, "{"):817 raise SyntaxError()818 self._tokenizer.put_back(tok)819 return820 # And parse [ AND expression ]*821 while True:822 tok = self._tokenizer.get_next_token()823 if self._classifier.is_value(tok) and tok['text'].lower() == "and":824 self._parse_media_expression()825 else:826 self._tokenizer.put_back(tok)827 break828 def _parse_media_expression(self):829 self._parse_required_operator("(")830 tok = self._tokenizer.get_next_token()831 if not (self._classifier.is_value_or_identifier(tok) and self._lex_identifier(tok)):832 self._add_result("expecting an identifier", tok)833 tok = self._tokenizer.get_next_token()834 if self._classifier.is_operator(tok, ":"):835 self._parse_expression()836 tok = self._tokenizer.get_next_token()837 if not self._classifier.is_operator(tok, ")"):838 self._add_result("expecting ':' or ')'", tok)839 self._tokenizer.put_back(tok)840 def _parse_media(self):841 self._parse_media_list()842 self._parse_inner_rulesets()843 def _parse_inner_rulesets(self):844 self._parse_required_operator("{")845 while True:846 tok = self._tokenizer.get_next_token()847 if tok['style'] == EOF_STYLE:848 self._add_result("expecting '}'", tok)849 return850 elif self._classifier.is_operator(tok, "}"):851 break852 self._tokenizer.put_back(tok)853 self._parse_ruleset()854 def _parse_moz_document(self):855 """856 docrule ::= "@-moz-document" S+ url-list "{" S* ruleset* "}"857 url-list ::= url-item ( "," S* url-item )*858 url-item ::= ( "url(" | "url-prefix(" | "domain(" ) URL ")" |859 "regexp(" string ")" S*860 """861 while True:862 res = self._parse_moz_document_item()863 if not res:864 break865 tok = self._tokenizer.get_next_token()866 if not self._classifier.is_operator(tok):867 # Stay in this loop, maybe we're seeing more moz-doc items868 self._add_result("expecting ',' or '{'", tok)869 self._tokenizer.put_back(tok)870 elif tok['text'] == "{":871 self._tokenizer.put_back(tok)872 break873 elif tok['text'] != ",":874 # Stay in this loop875 self._add_result("expecting ',' or '{'", tok)876 self._tokenizer.put_back(tok)877 self._parse_inner_rulesets()878 def _parse_page(self):879 tok = self._tokenizer.get_next_token()880 if self._classifier.is_operator(tok, ":"):881 tok = self._tokenizer.get_next_token()882 if not (self._classifier.is_special_identifier(tok)):883 self._add_result("expecting an identifier", tok)884 self._parser_putback_recover(tok)885 else:886 tok = None # refresh in _parse_declarations887 self._parse_declarations(tok)888 def _parse_namespace(self):889 tok = self._tokenizer.get_next_token()890 if (not self._classifier.is_value(tok)) or tok['text'] == "url(":891 self._tokenizer.put_back(tok)892 if (not self._parse_url()) and (not self._parse_string()):893 self._add_result("expecting a string or url", tok)894 tok = self._recover(allowEOF=True, opTokens=(';', "{"))895 if not self._classifier.is_operator(tok, ';'):896 self._tokenizer.put_back(tok)897 return898 self._parse_required_operator(";")899 def _parse_mixin_declaration(self, current_name):900 """901 Allow ()902 (@foo[:value]) or903 (@foo1[:value1], @foo2[:value2], ... @fooN[:valueN])904 """905 mixin_vars = []906 self._less_mixins[current_name] = mixin_vars907 tok = self._tokenizer.get_next_token()908 if self._classifier.is_operator(tok, ")"):909 return910 if self._classifier.is_operator(tok, "@"):911 while True:912 if not self._classifier.is_operator(tok, "@"):913 self._add_result("expecting ')' or a directive", tok)914 raise SyntaxError()915 tok = self._tokenizer.get_next_token()916 if not self._classifier.is_directive(tok):917 self._add_result("expecting a variable name", tok)918 raise SyntaxError()919 mixin_vars.append(tok['text'])920 tok = self._tokenizer.get_next_token()921 if self._classifier.is_operator(tok, ":"):922 self._parse_expression(consumeCommas=False)923 tok = self._tokenizer.get_next_token()924 if self._classifier.is_operator(tok, ","):925 tok = self._tokenizer.get_next_token()926 # Stay in loop927 elif self._classifier.is_operator(tok, ")"):928 return929 else:930 self._add_result("expecting ',' or ')'", tok)931 raise SyntaxError()932 # Just parse a mixin insertion. This happens when933 # a parameterless mixin was defined, but they look934 # exactly like class selectors.935 self._parse_mixin_invocation()936 self._inserted_mixin = True937 def _parse_mixin_invocation(self):938 """939 comma-separated values, followed by a ")"940 """941 tok = self._tokenizer.get_next_token()942 if self._classifier.is_operator(tok, ")"):943 return944 self._tokenizer.put_back(tok)945 while True:946 tok = self._tokenizer.get_next_token()947 if self._classifier.is_tag(tok):948 pass949 else:950 self._tokenizer.put_back(tok)951 self._parse_expression()952 tok = self._tokenizer.get_next_token()953 if self._classifier.is_operator(tok, ","):954 tok = self._tokenizer.get_next_token()955 # Stay in loop956 elif self._classifier.is_operator(tok, ")"):957 return958 else:959 self._add_result("expecting ',' or ')'", tok)960 raise SyntaxError()961 def _parse_declarations(self, tok=None):962 self._parse_required_operator("{", tok)963 while True:964 tok = self._tokenizer.get_next_token()965 if tok['style'] == EOF_STYLE:966 self._add_result("expecting '}', hit end of file", tok)967 raise SyntaxErrorEOF()968 if self._classifier.is_operator(tok, "}"):969 break970 self._tokenizer.put_back(tok)971 try:972 # TODO: Look ahead for either ';' or '{' to know973 # whether we're entering a nested block or a property974 #975 # The problem with ':' is that they can appear in both selectors976 # as well as after property-names.977 if self._supportsNestedDeclaration:978 self._parse_declaration_or_nested_block()979 else:980 self._parse_declaration()981 except SyntaxError:982 tok = self._recover(allowEOF=False, opTokens=(';', "{", "}"))983 t = tok['text']984 if t == ";":985 pass # This is good -- continue doing declarations.986 elif t == "}":987 self._tokenizer.put_back(tok) # Use this back in loop988 elif t == "{":989 # Either we're in less/scss, or we missed a selector, fake990 # it991 self._parse_declarations(tok)992 def _recover(self, allowEOF, opTokens):993 while True:994 tok = self._tokenizer.get_next_token()995 if tok['style'] == EOF_STYLE:996 if allowEOF:997 return tok998 raise SyntaxErrorEOF()999 elif self._classifier.is_operator_choose(tok, opTokens):1000 return tok1001 def _parse_declaration(self):1002 """1003 Because this is called in a loop, have it return True only if it matches everything1004 """1005 self._parse_property()1006 tok = self._tokenizer.get_next_token()1007 if not self._classifier.is_operator(tok, ":"):1008 self._add_result("expecting ':'", tok)1009 # Swallow it1010 self._parse_remaining_declaration()1011 def _parse_remaining_declaration(self):1012 """ SCSS allows nested declarations:1013 li {1014 font: {1015 family: serif; // => font-family: serif; //etc.1016 weight: bold;1017 size: 1.2em;1018 }1019 }1020 """1021 if self.language == "SCSS":1022 tok = self._tokenizer.get_next_token()1023 have_brace = self._classifier.is_operator(tok, "{")1024 self._tokenizer.put_back(tok)1025 if have_brace:1026 self._parse_declarations()1027 return1028 self._parse_expression()1029 self._parse_priority()1030 self._parse_optional_operator(";", "}")1031 def _parse_scss_mixin_use(self):1032 # Check for reading in a mixin1033 tok = self._tokenizer.get_next_token()1034 if not self._classifier.is_operator(tok, "@"):1035 self._tokenizer.put_back(tok)1036 return1037 tok = self._tokenizer.get_next_token()1038 if not (self._classifier.is_directive(tok) and tok['text'] == "include"):1039 self._add_result("expecting 'include'", tok)1040 self._tokenizer.put_back(tok)1041 return1042 tok = self._tokenizer.get_next_token()1043 if not (self._classifier.is_identifier(tok)1044 or self._classifier.is_tag(tok)):1045 self._add_result("expecting a mixin name", tok)1046 self._parser_putback_recover(tok)1047 tok = self._tokenizer.get_next_token()1048 if self._classifier.is_operator(tok, "("):1049 self._parse_mixin_invocation()1050 tok = self._tokenizer.get_next_token()1051 self._parse_required_operator(";", tok)1052 return True1053 def _parse_declaration_or_nested_block(self):1054 """1055 For Less and SCSS, blocks can nest. So parse either a property-name1056 or full-blown selector here.1057 # Key method for Less/SCSS linting. At this point we can have1058 # either a declaration or a nested rule-set.1059 """1060 # selectors are supersets of property-names, so go with it1061 self._saw_selector = False1062 self._inserted_mixin = False1063 if self.language == "SCSS":1064 if self._parse_scss_mixin_use():1065 return1066 # Try categorizing the next token to remove ambiguity1067 tok = self._tokenizer.get_next_token()1068 if (self.language == "Less"1069 and self._classifier.is_operator_choose(tok, ("+", ">"))):1070 self._parse_ruleset()1071 return1072 self._tokenizer.put_back(tok)1073 if (self._classifier.is_identifier(tok)1074 and (tok['text'] in raw_word_lists[0] or tok['text'] in raw_word_lists[2])):1075 # We have a property1076 self._parse_declaration()1077 # Don't continue parsing declarations -- the next item could start1078 # a nested block.1079 else:1080 self._parse_selector(resolve_selector_property_ambiguity=True)1081 tok = self._tokenizer.get_next_token()1082 if self._classifier.is_operator(tok, ","):1083 self._parse_ruleset()1084 return1085 if self._classifier.is_operator(tok, "{"):1086 return self._parse_declarations(tok)1087 if self._inserted_mixin:1088 # Nothing left to do.1089 # ; is optional before '}'1090 if self._classifier.is_operator(tok, ";"):1091 return1092 elif self._classifier.is_operator(tok, "}"):1093 self._tokenizer.put_back(tok)1094 return1095 if self._saw_selector:1096 self._add_result("expecting '{'", tok)1097 if self._classifier.is_operator(tok, ":"):1098 self._parse_remaining_declaration()1099 else:1100 #@NO TEST YET1101 self._add_result("expecting ':' or '{'", tok)1102 self._parser_putback_recover(tok)1103 def _parse_property(self):1104 tok = self._tokenizer.get_next_token()1105 if self._classifier.is_operator(tok, "*"):1106 prev_tok = tok1107 tok = self._tokenizer.get_next_token()1108 else:1109 prev_tok = None1110 if not (self._classifier.is_identifier(tok)1111 or self._classifier.is_tag(tok)):1112 #@NO TEST YET1113 self._add_result("expecting a property name", tok)1114 self._parser_putback_recover(tok)1115 if prev_tok is not None:1116 if prev_tok['end_column'] == tok['start_column']:1117 self._add_result_tok_parts("Use of non-standard property-name '%s%s'" %1118 (prev_tok['text'], tok['text']),1119 prev_tok['start_line'], prev_tok[1120 'start_column'],1121 tok['end_line'], tok[1122 'end_column'], "",1123 status=0)1124 else:1125 # Put the token back, trigger an error-message later1126 self._tokenizer.put_back(tok)1127 def _parse_expression(self, consumeCommas=True):1128 if self._parse_term(required=True):1129 while True:1130 self._parse_operator(consumeCommas)1131 if not self._parse_term(required=False):1132 break1133 def _parse_operator(self, consumeCommas=True):1134 tok = self._tokenizer.get_next_token()1135 if not self._classifier.is_operator(tok):1136 self._tokenizer.put_back(tok)1137 elif tok['text'] == "/" or (tok['text'] == "," and consumeCommas):1138 # use up1139 pass1140 elif self.language == "Less" and tok['text'] in ("~", "*", "^", "-", "+", "/", "|", "&", "||", "&&",):1141 # use up1142 pass1143 else:1144 self._tokenizer.put_back(tok)1145 def _parse_unary_operator(self):1146 tok = self._tokenizer.get_next_token()1147 if not self._classifier.is_operator(tok):1148 self._tokenizer.put_back(tok)1149 return False1150 elif not tok['text'] in ("+", "-"):1151 self._tokenizer.put_back(tok)1152 return False1153 else:1154 return True1155 def _parse_parenthesized_expression(self):1156 tok = self._tokenizer.get_next_token()1157 if not self._classifier.is_operator(tok, "("):1158 self._tokenizer.put_back(tok)1159 return False1160 self._parse_expression()1161 self._parse_required_operator(")")1162 return True1163 def _parse_escaped_string(self):1164 """1165 Accept any of1166 ~" ... "1167 ~` ... `1168 ` ... `1169 """1170 tok = self._tokenizer.get_next_token()1171 if not self._classifier.is_operator_choose(tok, ("~", '`')):1172 self._tokenizer.put_back(tok)1173 return False1174 if tok['text'] == '~':1175 prev_tok = tok1176 tok = self._tokenizer.get_next_token()1177 if not self._classifier.is_operator(tok) or tok['text'] not in ('"', '`'):1178 self._tokenizer.put_back(prev_tok)1179 self._tokenizer.put_back(tok)1180 return False1181 target = tok['text']1182 while True:1183 tok = self._tokenizer.get_next_token()1184 if tok['style'] == EOF_STYLE:1185 self._add_result(1186 "expecting '%s', hit end of file" % (target,), tok)1187 raise SyntaxErrorEOF()1188 elif self._classifier.is_operator(tok, target):1189 return True1190 def _parse_term(self, required=False):1191 exp_num = self._parse_unary_operator()1192 have_num = self._parse_number(exp_num)1193 if have_num:1194 return True1195 elif exp_num:1196 return False1197 if self._parse_string():1198 return True1199 elif self._parse_url():1200 return True1201 elif self._parse_hex_color():1202 return True1203 elif self._parse_function_call_or_term_identifier():1204 return True1205 elif self._parse_variable_reference():1206 return True1207 elif self.language == "Less":1208 if self._parse_parenthesized_expression():1209 return True1210 elif self._parse_escaped_string():1211 return True1212 if required:1213 tok = self._tokenizer.get_next_token()1214 self._check_tag_tok(tok, 8)1215 self._add_result("expecting a value", tok)1216 self._tokenizer.put_back(tok)1217 return False1218 _simple_number_re = re.compile(r'\d+')1219 def _parse_number(self, exp_num):1220 tok = self._tokenizer.get_next_token()1221 if self._classifier.is_number(tok):1222 # Bug 94652: Look for unrecognized units1223 nextTok = self._tokenizer.get_next_token()1224 if (nextTok['style'] == ScintillaConstants.SCE_CSS_VALUE1225 and nextTok['start_line'] == tok['end_line']1226 and nextTok['start_column'] == tok['end_column']):1227 self._add_result("got an unsupported or unrecognized numeric unit: '%s'" % nextTok[1228 'text'], nextTok)1229 else:1230 self._tokenizer.put_back(nextTok)1231 return True1232 elif (tok['style'] == ScintillaConstants.SCE_CSS_UNKNOWN_PSEUDOCLASS1233 and self._simple_number_re.match(tok['text'])):1234 return True1235 elif exp_num:1236 self._add_result("expecting a number", tok)1237 self._parser_putback_recover(tok)1238 else:1239 self._tokenizer.put_back(tok)1240 return False1241 def _parse_string(self):1242 tok = self._tokenizer.get_next_token()1243 if self._classifier.is_stringeol(tok):1244 self._add_result("missing string close-quote", tok)1245 elif not self._classifier.is_string(tok):1246 self._tokenizer.put_back(tok)1247 return False1248 return True1249 def _parse_term_identifier(self):1250 required = False1251 prev_tok = None1252 while True:1253 tok = self._tokenizer.get_next_token()1254 if not (self._classifier.is_value(tok) and self._lex_identifier(tok)):1255 if required:1256 self._add_result("expecting an identifier", tok)1257 # Swallow the ':' or '.' that got us here.1258 return False1259 else:1260 self._tokenizer.put_back(tok)1261 return prev_tok is not None1262 prev_tok = tok1263 tok = self._tokenizer.get_next_token()1264 if self._classifier.is_operator(tok, "="):1265 self._parse_expression()1266 return prev_tok # tok = self._tokenizer.get_next_token()1267 if not (self._classifier.is_operator(tok)1268 and tok['text'] in (":", ".")): # Microsoft additions1269 self._tokenizer.put_back(tok)1270 return prev_tok1271 op_tok = tok1272 required = True1273 def _parse_identifier(self):1274 tok = self._tokenizer.get_next_token()1275 if not (self._classifier.is_value(tok) and self._lex_identifier(tok)):1276 self._tokenizer.put_back(tok)1277 return False1278 else:1279 return True1280 _url_re = re.compile(r'url\((.*)\)\Z')1281 def _parse_url(self):1282 tok = self._tokenizer.get_next_token()1283 if self._classifier.is_value(tok):1284 if self._url_re.match(tok['text']):1285 return True1286 if tok['text'] == "url(":1287 # Verify that the actual URL is a string1288 if not self._parse_string():1289 tok = self._tokenizer.get_next_token()1290 self._add_result("expecting a quoted URL", tok)1291 self._parser_putback_recover(tok)1292 tok = self._tokenizer.get_next_token()1293 if not (self._classifier.is_operator(tok, ")")1294 or (self._classifier.is_value(tok) and tok['text'] == ')')):1295 self._add_result("expecting ')'", tok)1296 self._parser_putback_recover(tok)1297 else:1298 return True1299 self._tokenizer.put_back(tok)1300 return False1301 _url_item_re = re.compile(r'(?:url|url-prefix|domain)\((.*)\)\Z')1302 def _parse_url_item(self):1303 tok = self._tokenizer.get_next_token()1304 if self._classifier.is_value(tok):1305 if self._url_re.match(tok['text']):1306 return True1307 if tok['text'] == "url(":1308 # Verify that the actual URL is a string1309 if not self._parse_string():1310 tok = self._tokenizer.get_next_token()1311 self._add_result("expecting a quoted URL", tok)1312 self._parser_putback_recover(tok)1313 tok = self._tokenizer.get_next_token()1314 if not (self._classifier.is_operator(tok, ")")1315 or (self._classifier.is_value(tok) and tok['text'] == ')')):1316 self._add_result("expecting ')'", tok)1317 self._parser_putback_recover(tok)1318 else:1319 return True1320 self._tokenizer.put_back(tok)1321 return False1322 moz_document_item_types = ("url", "url-prefix", "domain", "regexp")1323 moz_document_item_types_with_paren = tuple(1324 [x + "(" for x in moz_document_item_types])1325 def _parse_moz_document_item(self):1326 tok = self._tokenizer.get_next_token()1327 if not tok['text'].startswith(self.moz_document_item_types_with_paren):1328 self._add_result("expecting a -moz-document url-item", tok)1329 self._parser_putback_recover(tok)1330 if tok['text'] in self.moz_document_item_types_with_paren:1331 self._parse_string()1332 self._parse_required_operator(")")1333 elif tok['text'].startswith("regexp("):1334 self._add_result(1335 "the regexp argument must be a quoted string", tok)1336 _hex_color_re = re.compile(r'#(?:[\da-fA-F]{3}){1,2}\Z')1337 def _parse_hex_color(self):1338 tok = self._tokenizer.get_next_token()1339 if (self._classifier.is_value(tok)1340 and self._hex_color_re.match(tok['text'])):1341 return True1342 elif self.language != "CSS" and self._classifier.is_operator(tok, "#"):1343 new_tok = self._tokenizer.get_next_token()1344 if self._hex_color_re.match("#" + new_tok['text']):1345 return True1346 self._tokenizer.put_back(tok)1347 self._tokenizer.put_back(new_tok)1348 else:1349 self._tokenizer.put_back(tok)1350 return False1351 def _parse_function_call_or_term_identifier(self):1352 res = self._parse_term_identifier()1353 if not res:1354 return False1355 tok = self._tokenizer.get_next_token()1356 if not self._classifier.is_operator(tok, "("):1357 self._tokenizer.put_back(tok)1358 return True1359 self._parse_expression() # Includes commas1360 self._parse_required_operator(")")1361 return True1362 def _parse_variable_reference(self):1363 tok = self._tokenizer.get_next_token()1364 if self._classifier.is_operator(tok, "@") and self.language == "Less":1365 tok = self._tokenizer.get_next_token()1366 # Allow multiple '@' signs1367 while self._classifier.is_operator(tok, "@"):1368 tok = self._tokenizer.get_next_token()1369 if not (self._classifier.is_attribute(tok)1370 or self._classifier.is_identifier(tok)1371 or self._classifier.is_directive(tok)):1372 self._add_result("expecting an identifier", tok)1373 return True1374 elif self._is_scss_variable(tok):1375 return True1376 self._tokenizer.put_back(tok)1377 return False1378 def _parse_priority(self):1379 tok = self._tokenizer.get_next_token()1380 if self._classifier.is_important(tok, "!important"):1381 return1382 elif not self._classifier.is_important(tok, "!"):1383 self._tokenizer.put_back(tok)1384 else:1385 tok = self._tokenizer.get_next_token()1386 if not self._classifier.is_important(tok, "important"):1387 self._add_result("expecting '!important',", tok)1388 self._parser_putback_recover(tok)1389 def _parse_identifier_list(self, classifier, separator):1390 while True:1391 tok = self._tokenizer.get_next_token()1392 self._check_tag_tok(tok, 9)1393 if not self._classifier.is_operator(tok, separator):1394 self._tokenizer.put_back(tok)1395 break1396 tok = self._tokenizer.get_next_token()1397 if not (classifier(tok) and self._lex_identifier(tok)):1398 self._add_result("expecting an identifier", tok)1399 return self._parser_putback_recover(tok)1400 def _parse_top_level(self):1401 self._region = self._PARSE_REGION_AT_START1402 do_declarations_this_time = False # for recovery1403 while True:1404 if not do_declarations_this_time:1405 tok = self._tokenizer.get_next_token()1406 if tok is None:1407 log.error("tok is None")1408 break1409 if tok['style'] == EOF_STYLE:1410 return1411 self._check_tag_tok(tok, 10)1412 try:1413 if do_declarations_this_time:1414 do_declarations_this_time = False1415 self._parse_declarations()1416 elif self._classifier.is_operator(tok, "@"):1417 self._parse_directive(tok)1418 elif self._is_scss_variable(tok):1419 self._parse_assignment()1420 else:1421 self._tokenizer.put_back(tok)1422 self._region = self._PARSE_REGION_SAW_OTHER1423 self._parse_ruleset()1424 except SyntaxErrorEOF:1425 break1426 except SyntaxError:1427 tok = self._recover(allowEOF=True, opTokens=("{", "}", "@"))1428 if tok['style'] == EOF_STYLE:1429 return1430 if self._classifier.is_operator(tok, "{"):1431 self._tokenizer.put_back(tok)1432 # slightly convoluted way of running code in the same1433 # try/except block1434 do_declarations_this_time = True1435 elif self._classifier.is_operator(tok, "@"):1436 self._tokenizer.put_back(tok)1437 # Otherwise consume the "}" and continue1438 _identifier_lex_re = re.compile(1439 r'(?:[a-zA-Z_\-\x80-\xff]|\\[^\r\n\f0-9a-fA-F])(?:[\w_\-\x80-\xff]|\\[^\r\n\f0-9a-fA-F])*$')1440 def _lex_identifier(self, tok):1441 return self._identifier_lex_re.match(tok['text'])1442 def _is_scss_variable(self, tok):1443 if self.language != "SCSS":1444 return False1445 return (self._classifier.is_identifier(tok)1446 and tok['text'][0] == "$")1447 _check_tag_tok_count = 01448 def _check_tag_tok(self, tok, loop_id):1449 tag = "_check_loop_%d" % (loop_id,)1450 if tag not in tok:...

Full Screen

Full Screen

sansio.py

Source:sansio.py Github

copy

Full Screen

1import struct2from .exceptions import SocksException3MAX_STRING_SIZE = 2 ** 104class SansIORW:5 def __init__(self, encoding):6 self.buffer = b""7 self.encoding = encoding8 def _take_first(self, x, *, put_back=False):9 result = self.buffer[:x]10 if put_back:11 return result12 self.buffer = self.buffer[x:]13 return result14 def _read(self):15 data = yield dict(method="read")16 if not data:17 raise SocksException("Unexpected end of data")18 return data19 def read_exactly(self, count, *, put_back=False):20 while len(self.buffer) < count:21 self.buffer += yield from self._read()22 return self._take_first(count, put_back=put_back)23 def read_until(self, delimiter, *, max_size=None, put_back=False):24 while True:25 pos = self.buffer.find(delimiter)26 if max_size is not None and (pos == -1 and len(self.buffer) > max_size or pos > max_size):27 raise SocksException(f"Buffer became too long ({len(self.buffer)} > {max_size})")28 if pos != -1:29 return self._take_first(pos, put_back=put_back)30 self.buffer += yield from self._read()31 def read_struct(self, fmt, *, put_back=False):32 s = struct.Struct("!" + fmt)33 raw = yield from self.read_exactly(s.size, put_back=put_back)34 values = s.unpack(raw)35 if len(values) == 1:36 return values[0]37 return values38 def read_c_string(self, *, max_size=MAX_STRING_SIZE):39 b = yield from self.read_until(delimiter=b"\x00", max_size=max_size)40 yield from self.read_exactly(1)41 if self.encoding is None:42 return b43 return b.decode(self.encoding)44 def read_pascal_string(self):45 size = yield from self.read_struct("B")46 b = yield from self.read_exactly(size)47 if self.encoding is None:48 return b49 return b.decode(self.encoding)50 def write(self, data):51 yield dict(method="write", data=data)52 def write_struct(self, fmt, *values):53 s = struct.Struct("!" + fmt)54 yield from self.write(s.pack(*values))55 def write_c_string(self, s):56 b = s if self.encoding is None else s.encode(self.encoding)57 yield from self.write(b)58 yield from self.write(b"\x00")59 def write_pascal_string(self, s):60 b = s if self.encoding is None else s.encode(self.encoding)61 size = len(b)62 if size > 255:63 raise SocksException(f"Pascal string must be no longer than 255 characters, got {size}")64 yield from self.write_struct("B", size)65 yield from self.write(b)66 def connect(self, host, port):67 yield dict(method="connect", host=host, port=port)68 def passthrough(self):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful