How to use _total_index method in autotest

Best Python code snippet using autotest_python

nodeiterator.py

Source:nodeiterator.py Github

copy

Full Screen

1import base642import hashlib3import json4import os5from contextlib import contextmanager6from datetime import datetime, timedelta7from lzma import LZMAError8from typing import Any, Callable, Dict, Iterable, Iterator, NamedTuple, Optional, Tuple, TypeVar9from .exceptions import AbortDownloadException, InvalidArgumentException, QueryReturnedBadRequestException10from .instaloadercontext import InstaloaderContext11FrozenNodeIterator = NamedTuple('FrozenNodeIterator',12 [('query_hash', str),13 ('query_variables', Dict),14 ('query_referer', Optional[str]),15 ('context_username', Optional[str]),16 ('total_index', int),17 ('best_before', Optional[float]),18 ('remaining_data', Optional[Dict]),19 ('first_node', Optional[Dict])])20FrozenNodeIterator.query_hash.__doc__ = """The GraphQL ``query_hash`` parameter."""21FrozenNodeIterator.query_variables.__doc__ = """The GraphQL ``query_variables`` parameter."""22FrozenNodeIterator.query_referer.__doc__ = """The HTTP referer used for the GraphQL query."""23FrozenNodeIterator.context_username.__doc__ = """The username who created the iterator, or ``None``."""24FrozenNodeIterator.total_index.__doc__ = """Number of items that have already been returned."""25FrozenNodeIterator.best_before.__doc__ = """Date when parts of the stored nodes might have expired."""26FrozenNodeIterator.remaining_data.__doc__ = \27 """The already-retrieved, yet-unprocessed ``edges`` and the ``page_info`` at time of freezing."""28FrozenNodeIterator.first_node.__doc__ = """Node data of the first item, if an item has been produced."""29T = TypeVar('T')30class NodeIterator(Iterator[T]):31 """32 Iterate the nodes within edges in a GraphQL pagination. Instances of this class are returned by many (but not all)33 of Instaloader's :class:`Post`-returning functions (such as :meth:`Profile.get_posts` etc.).34 What makes this iterator special is its ability to freeze/store its current state, e.g. to interrupt an iteration,35 and later thaw/resume from where it left off.36 You can freeze a NodeIterator with :meth:`NodeIterator.freeze`::37 post_iterator = profile.get_posts()38 try:39 for post in post_iterator:40 do_something_with(post)41 except KeyboardInterrupt:42 save("resume_information.json", post_iterator.freeze())43 and later reuse it with :meth:`NodeIterator.thaw` on an equally-constructed NodeIterator::44 post_iterator = profile.get_posts()45 post_iterator.thaw(load("resume_information.json"))46 (an appropriate method to load and save the :class:`FrozenNodeIterator` is e.g.47 :func:`load_structure_from_file` and :func:`save_structure_to_file`.)48 A :class:`FrozenNodeIterator` can only be thawn with a matching NodeIterator, i.e. a NodeIterator instance that has49 been constructed with the same parameters as the instance that is represented by the :class:`FrozenNodeIterator` in50 question. This is to ensure that an iteration cannot be resumed in a wrong, unmatching loop. As a quick way to51 distinguish iterators that are saved e.g. in files, there is the :attr:`NodeIterator.magic` string: Two52 NodeIterators are matching if and only if they have the same magic.53 See also :func:`resumable_iteration` for a high-level context manager that handles a resumable iteration.54 """55 _graphql_page_length = 5056 _shelf_life = timedelta(days=29)57 def __init__(self,58 context: InstaloaderContext,59 query_hash: str,60 edge_extractor: Callable[[Dict[str, Any]], Dict[str, Any]],61 node_wrapper: Callable[[Dict], T],62 query_variables: Optional[Dict[str, Any]] = None,63 query_referer: Optional[str] = None,64 first_data: Optional[Dict[str, Any]] = None):65 self._context = context66 self._query_hash = query_hash67 self._edge_extractor = edge_extractor68 self._node_wrapper = node_wrapper69 self._query_variables = query_variables if query_variables is not None else {}70 self._query_referer = query_referer71 self._page_index = 072 self._total_index = 073 if first_data is not None:74 self._data = first_data75 self._best_before = datetime.now() + NodeIterator._shelf_life76 else:77 self._data = self._query()78 self._first_node: Optional[Dict] = None79 def _query(self, after: Optional[str] = None) -> Dict:80 pagination_variables = {'first': NodeIterator._graphql_page_length} # type: Dict[str, Any]81 if after is not None:82 pagination_variables['after'] = after83 try:84 data = self._edge_extractor(85 self._context.graphql_query(86 self._query_hash, {**self._query_variables, **pagination_variables}, self._query_referer87 )88 )89 self._best_before = datetime.now() + NodeIterator._shelf_life90 return data91 except QueryReturnedBadRequestException:92 new_page_length = int(NodeIterator._graphql_page_length / 2)93 if new_page_length >= 12:94 NodeIterator._graphql_page_length = new_page_length95 self._context.error("HTTP Error 400 (Bad Request) on GraphQL Query. Retrying with shorter page length.",96 repeat_at_end=False)97 return self._query(after)98 else:99 raise100 def __iter__(self):101 return self102 def __next__(self) -> T:103 if self._page_index < len(self._data['edges']):104 node = self._data['edges'][self._page_index]['node']105 page_index, total_index = self._page_index, self._total_index106 try:107 self._page_index += 1108 self._total_index += 1109 except KeyboardInterrupt:110 self._page_index, self._total_index = page_index, total_index111 raise112 item = self._node_wrapper(node)113 if self._first_node is None:114 self._first_node = node115 return item116 if self._data['page_info']['has_next_page']:117 query_response = self._query(self._data['page_info']['end_cursor'])118 if self._data['edges'] != query_response['edges']:119 page_index, data = self._page_index, self._data120 try:121 self._page_index = 0122 self._data = query_response123 except KeyboardInterrupt:124 self._page_index, self._data = page_index, data125 raise126 return self.__next__()127 raise StopIteration()128 @property129 def count(self) -> Optional[int]:130 """The ``count`` as returned by Instagram. This is not always the total count this iterator will yield."""131 return self._data.get('count') if self._data is not None else None132 @property133 def total_index(self) -> int:134 """Number of items that have already been returned."""135 return self._total_index136 @property137 def magic(self) -> str:138 """Magic string for easily identifying a matching iterator file for resuming (hash of some parameters)."""139 magic_hash = hashlib.blake2b(digest_size=6)140 magic_hash.update(json.dumps(141 [self._query_hash, self._query_variables, self._query_referer, self._context.username]142 ).encode())143 return base64.urlsafe_b64encode(magic_hash.digest()).decode()144 @property145 def first_item(self) -> Optional[T]:146 """147 If this iterator has produced any items, returns the first item produced.148 .. versionadded:: 4.8149 """150 return self._node_wrapper(self._first_node) if self._first_node is not None else None151 def freeze(self) -> FrozenNodeIterator:152 """Freeze the iterator for later resuming."""153 remaining_data = None154 if self._data is not None:155 remaining_data = {**self._data,156 'edges': (self._data['edges'][(max(self._page_index - 1, 0)):])}157 return FrozenNodeIterator(158 query_hash=self._query_hash,159 query_variables=self._query_variables,160 query_referer=self._query_referer,161 context_username=self._context.username,162 total_index=max(self.total_index - 1, 0),163 best_before=self._best_before.timestamp() if self._best_before else None,164 remaining_data=remaining_data,165 first_node=self._first_node,166 )167 def thaw(self, frozen: FrozenNodeIterator) -> None:168 """169 Use this iterator for resuming from earlier iteration.170 :raises InvalidArgumentException:171 If172 - the iterator on which this method is called has already been used, or173 - the given :class:`FrozenNodeIterator` does not match, i.e. belongs to a different iteration.174 """175 if self._total_index or self._page_index:176 raise InvalidArgumentException("thaw() called on already-used iterator.")177 if (self._query_hash != frozen.query_hash or178 self._query_variables != frozen.query_variables or179 self._query_referer != frozen.query_referer or180 self._context.username != frozen.context_username):181 raise InvalidArgumentException("Mismatching resume information.")182 if not frozen.best_before:183 raise InvalidArgumentException("\"best before\" date missing.")184 if frozen.remaining_data is None:185 raise InvalidArgumentException("\"remaining_data\" missing.")186 self._total_index = frozen.total_index187 self._best_before = datetime.fromtimestamp(frozen.best_before)188 self._data = frozen.remaining_data189 if frozen.first_node is not None:190 self._first_node = frozen.first_node191@contextmanager192def resumable_iteration(context: InstaloaderContext,193 iterator: Iterable,194 load: Callable[[InstaloaderContext, str], Any],195 save: Callable[[FrozenNodeIterator, str], None],196 format_path: Callable[[str], str],197 check_bbd: bool = True,198 enabled: bool = True) -> Iterator[Tuple[bool, int]]:199 """200 High-level context manager to handle a resumable iteration that can be interrupted201 with a :class:`KeyboardInterrupt` or an :class:`AbortDownloadException`.202 It can be used as follows to automatically load a previously-saved state into the iterator, save the iterator's203 state when interrupted, and delete the resume file upon completion::204 post_iterator = profile.get_posts()205 with resumable_iteration(206 context=L.context,207 iterator=post_iterator,208 load=lambda _, path: FrozenNodeIterator(**json.load(open(path))),209 save=lambda fni, path: json.dump(fni._asdict(), open(path, 'w')),210 format_path=lambda magic: "resume_info_{}.json".format(magic)211 ) as (is_resuming, start_index):212 for post in post_iterator:213 do_something_with(post)214 It yields a tuple (is_resuming, start_index).215 When the passed iterator is not a :class:`NodeIterator`, it behaves as if ``resumable_iteration`` was not used,216 just executing the inner body.217 :param context: The :class:`InstaloaderContext`.218 :param iterator: The fresh :class:`NodeIterator`.219 :param load: Loads a FrozenNodeIterator from given path. The object is ignored if it has a different type.220 :param save: Saves the given FrozenNodeIterator to the given path.221 :param format_path: Returns the path to the resume file for the given magic.222 :param check_bbd: Whether to check the best before date and reject an expired FrozenNodeIterator.223 :param enabled: Set to False to disable all functionality and simply execute the inner body.224 .. versionchanged:: 4.7225 Also interrupt on :class:`AbortDownloadException`.226 """227 if not enabled or not isinstance(iterator, NodeIterator):228 yield False, 0229 return230 is_resuming = False231 start_index = 0232 resume_file_path = format_path(iterator.magic)233 resume_file_exists = os.path.isfile(resume_file_path)234 if resume_file_exists:235 try:236 fni = load(context, resume_file_path)237 if not isinstance(fni, FrozenNodeIterator):238 raise InvalidArgumentException("Invalid type.")239 if check_bbd and fni.best_before and datetime.fromtimestamp(fni.best_before) < datetime.now():240 raise InvalidArgumentException("\"Best before\" date exceeded.")241 iterator.thaw(fni)242 is_resuming = True243 start_index = iterator.total_index244 context.log("Resuming from {}.".format(resume_file_path))245 except (InvalidArgumentException, LZMAError, json.decoder.JSONDecodeError) as exc:246 context.error("Warning: Not resuming from {}: {}".format(resume_file_path, exc))247 try:248 yield is_resuming, start_index249 except (KeyboardInterrupt, AbortDownloadException):250 if os.path.dirname(resume_file_path):251 os.makedirs(os.path.dirname(resume_file_path), exist_ok=True)252 save(iterator.freeze(), resume_file_path)253 context.log("\nSaved resume information to {}.".format(resume_file_path))254 raise255 if resume_file_exists:256 os.unlink(resume_file_path)...

Full Screen

Full Screen

test_testpoint.py

Source:test_testpoint.py Github

copy

Full Screen

1# ctrl -j new line and indent2# ctrl-c ctrl-j3# longlines-mode45import unittest678class Serial:9 def __init__(self, values):10 self.values= values11 self._index=012 def read(self,num):13 v=self.values[self._index:self._index+num]14 self._index+=num15 return v1617class TestPoint:18 def __init__(self, serial, pace):19 self.serial= serial20 self.pace = pace21 self.bind=022 self._total_index=052324 def next1(self):25 self.buffer=self.serial.read(self.pace)26 v=self.buffer[0]27 self._total_index=028 return self._next(v)29 3031 def next2(self):32 self.bind=033 self.buffer=self.serial.read(self.pace)34 self._total_index+=self.pace35 v=self.buffer[self.bind]36 return self._next(v)3738 def _next(self,v):39 isMarker=(self.bind%5)==040 return (v,isMarker)41 42class Test(unittest.TestCase):43 def test1(self):44 self.assertEquals(1,1)4546 def test_same(self):47 #!0 1 2 3 4!5 6 748 serial=Serial([1,1,1,1,1,1,1,1])49 tp=TestPoint(serial,pace=5)50 self.assertEquals((1,True),tp.next1())5152 self.assertEquals((1,True),tp.next2())53 self.assertEquals(5, tp._total_index)54 55 5657if __name__=='__main__': ...

Full Screen

Full Screen

mean_data_point.py

Source:mean_data_point.py Github

copy

Full Screen

1import numpy as np2class MeanDataPoint:3 def __init__(self, mean_size_: int = None):4 # mean filter5 default_ = 56 self._mean_size = mean_size_ if mean_size_ is not None else default_7 8 # data collection9 self._data_collect = np.empty(self._mean_size, dtype=object)10 self._total_index = 011 self._cur_index = 012 13 def set_mean_size(self, mean_size_: int):14 self._mean_size = mean_size_15 def push_raw_data(self, raw_dat_):16 # update data collection17 if self._data_collect.size < self._mean_size:18 np.append(self._data_collect, raw_dat_)19 else:20 self._data_collect[self._cur_index] = raw_dat_21 # update index22 self._total_index += 123 self._cur_index = self._total_index % self._mean_size24 def get_filter_value(self) -> float:25 mean_ = None26 range_index_ = self._mean_size27 if self._total_index < self._mean_size:28 range_index_ = self._total_index29 if range_index_ > 0:30 mean_ = np.mean(self._data_collect[:range_index_])31 return mean_...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful