How to use _assert_batch method in localstack

Best Python code snippet using localstack_python

provider.py

Source:provider.py Github

copy

Full Screen

...655 entries: ChangeMessageVisibilityBatchRequestEntryList,656 ) -> ChangeMessageVisibilityBatchResult:657 queue = self._resolve_queue(context, queue_url=queue_url)658 self._assert_permission(context, queue)659 self._assert_batch(entries)660 successful = []661 failed = []662 with queue.mutex:663 for entry in entries:664 try:665 queue.update_visibility_timeout(666 entry["ReceiptHandle"], entry["VisibilityTimeout"]667 )668 successful.append({"Id": entry["Id"]})669 except Exception as e:670 failed.append(671 BatchResultErrorEntry(672 Id=entry["Id"],673 SenderFault=False,674 Code=e.__class__.__name__,675 Message=str(e),676 )677 )678 return ChangeMessageVisibilityBatchResult(679 Successful=successful,680 Failed=failed,681 )682 def delete_queue(self, context: RequestContext, queue_url: String) -> None:683 with self._mutex:684 queue = self._resolve_queue(context, queue_url=queue_url)685 self._assert_permission(context, queue)686 del self.queues[queue.key]687 def get_queue_attributes(688 self, context: RequestContext, queue_url: String, attribute_names: AttributeNameList = None689 ) -> GetQueueAttributesResult:690 queue = self._resolve_queue(context, queue_url=queue_url)691 self._assert_permission(context, queue)692 if not attribute_names:693 return GetQueueAttributesResult(Attributes={})694 if QueueAttributeName.All in attribute_names:695 # return GetQueueAttributesResult(Attributes=queue.attributes)696 attribute_names = queue.attributes.keys()697 result: Dict[QueueAttributeName, str] = {}698 for attr in attribute_names:699 try:700 getattr(QueueAttributeName, attr)701 except AttributeError:702 raise InvalidAttributeName(f"Unknown attribute {attr}.")703 if callable(queue.attributes.get(attr)):704 func = queue.attributes.get(attr)705 result[attr] = func()706 else:707 result[attr] = queue.attributes.get(attr)708 return GetQueueAttributesResult(Attributes=result)709 def send_message(710 self,711 context: RequestContext,712 queue_url: String,713 message_body: String,714 delay_seconds: Integer = None,715 message_attributes: MessageBodyAttributeMap = None,716 message_system_attributes: MessageBodySystemAttributeMap = None,717 message_deduplication_id: String = None,718 message_group_id: String = None,719 ) -> SendMessageResult:720 queue = self._resolve_queue(context, queue_url=queue_url)721 self._assert_permission(context, queue)722 message = self._put_message(723 queue,724 context,725 message_body,726 delay_seconds,727 message_attributes,728 message_system_attributes,729 message_deduplication_id,730 message_group_id,731 )732 return SendMessageResult(733 MessageId=message["MessageId"],734 MD5OfMessageBody=message["MD5OfBody"],735 MD5OfMessageAttributes=message.get("MD5OfMessageAttributes"),736 SequenceNumber=queue.generate_sequence_number(),737 MD5OfMessageSystemAttributes=_create_message_attribute_hash(message_system_attributes),738 )739 def send_message_batch(740 self, context: RequestContext, queue_url: String, entries: SendMessageBatchRequestEntryList741 ) -> SendMessageBatchResult:742 queue = self._resolve_queue(context, queue_url=queue_url)743 self._assert_permission(context, queue)744 self._assert_batch(entries)745 successful = []746 failed = []747 with queue.mutex:748 for entry in entries:749 try:750 message = self._put_message(751 queue,752 context,753 message_body=entry.get("MessageBody"),754 delay_seconds=entry.get("DelaySeconds"),755 message_attributes=entry.get("MessageAttributes"),756 message_system_attributes=entry.get("MessageSystemAttributes"),757 message_deduplication_id=entry.get("MessageDeduplicationId"),758 message_group_id=entry.get("MessageGroupId"),759 )760 successful.append(761 SendMessageBatchResultEntry(762 Id=entry["Id"],763 MessageId=message.get("MessageId"),764 MD5OfMessageBody=message.get("MD5OfBody"),765 MD5OfMessageAttributes=message.get("MD5OfMessageAttributes"),766 MD5OfMessageSystemAttributes=_create_message_attribute_hash(767 message.get("message_system_attributes")768 ),769 SequenceNumber=queue.generate_sequence_number(),770 )771 )772 except Exception as e:773 failed.append(774 BatchResultErrorEntry(775 Id=entry["Id"],776 SenderFault=False,777 Code=e.__class__.__name__,778 Message=str(e),779 )780 )781 return SendMessageBatchResult(782 Successful=successful,783 Failed=failed,784 )785 def _put_message(786 self,787 queue: SqsQueue,788 context: RequestContext,789 message_body: String,790 delay_seconds: Integer = None,791 message_attributes: MessageBodyAttributeMap = None,792 message_system_attributes: MessageBodySystemAttributeMap = None,793 message_deduplication_id: String = None,794 message_group_id: String = None,795 ) -> Message:796 # TODO: default message attributes (SenderId, ApproximateFirstReceiveTimestamp, ...)797 check_message_content(message_body)798 check_attributes(message_attributes)799 check_attributes(message_system_attributes)800 check_fifo_id(message_deduplication_id)801 check_fifo_id(message_group_id)802 message: Message = Message(803 MessageId=generate_message_id(),804 MD5OfBody=md5(message_body),805 Body=message_body,806 Attributes=self._create_message_attributes(context, message_system_attributes),807 MD5OfMessageAttributes=_create_message_attribute_hash(message_attributes),808 MessageAttributes=message_attributes,809 )810 delay_seconds = delay_seconds or queue.attributes.get(QueueAttributeName.DelaySeconds, "0")811 if int(delay_seconds):812 # FIXME: this is a pretty bad implementation (one thread per message...). polling on a priority queue813 # would probably be better. We also need access to delayed messages for the814 # ApproximateNumberrOfDelayedMessages attribute.815 threading.Timer(816 int(delay_seconds),817 queue.put,818 args=(message, message_deduplication_id, message_group_id),819 ).start()820 else:821 queue.put(822 message=message,823 message_deduplication_id=message_deduplication_id,824 message_group_id=message_group_id,825 )826 return message827 def receive_message(828 self,829 context: RequestContext,830 queue_url: String,831 attribute_names: AttributeNameList = None,832 message_attribute_names: MessageAttributeNameList = None,833 max_number_of_messages: Integer = None,834 visibility_timeout: Integer = None,835 wait_time_seconds: Integer = None,836 receive_request_attempt_id: String = None,837 ) -> ReceiveMessageResult:838 queue = self._resolve_queue(context, queue_url=queue_url)839 self._assert_permission(context, queue)840 num = max_number_of_messages or 1841 block = wait_time_seconds is not None842 # collect messages843 messages = []844 while num:845 try:846 standard_message = queue.get(847 block=block, timeout=wait_time_seconds, visibility_timeout=visibility_timeout848 )849 msg = standard_message.message850 except Empty:851 break852 moved_to_dlq = False853 if (854 queue.attributes855 and queue.attributes.get(QueueAttributeName.RedrivePolicy) is not None856 ):857 moved_to_dlq = self._dead_letter_check(queue, standard_message, context)858 if moved_to_dlq:859 continue860 # filter attributes861 if message_attribute_names:862 if "All" not in message_attribute_names:863 msg["MessageAttributes"] = {864 k: v865 for k, v in msg["MessageAttributes"].items()866 if k in message_attribute_names867 }868 # TODO: why is this called even if we receive "All" attributes?869 msg["MD5OfMessageAttributes"] = _create_message_attribute_hash(870 msg["MessageAttributes"]871 )872 else:873 del msg["MessageAttributes"]874 # add message to result875 messages.append(msg)876 num -= 1877 # TODO: how does receiving behave if the queue was deleted in the meantime?878 return ReceiveMessageResult(Messages=messages)879 def _dead_letter_check(880 self, queue: SqsQueue, std_m: SqsMessage, context: RequestContext881 ) -> bool:882 redrive_policy = json.loads(queue.attributes.get(QueueAttributeName.RedrivePolicy))883 # TODO: include the names of the dictionary sub - attributes in the autogenerated code?884 max_receive_count = redrive_policy["maxReceiveCount"]885 if std_m.receive_times > max_receive_count:886 dead_letter_target_arn = redrive_policy["deadLetterTargetArn"]887 dl_queue = self._require_queue_by_arn(dead_letter_target_arn)888 # TODO: this needs to be atomic?889 dead_message = std_m.message890 dl_queue.put(message=dead_message)891 queue.remove(std_m.message["ReceiptHandle"])892 return True893 else:894 return False895 def delete_message(896 self, context: RequestContext, queue_url: String, receipt_handle: String897 ) -> None:898 queue = self._resolve_queue(context, queue_url=queue_url)899 self._assert_permission(context, queue)900 queue.remove(receipt_handle)901 def delete_message_batch(902 self,903 context: RequestContext,904 queue_url: String,905 entries: DeleteMessageBatchRequestEntryList,906 ) -> DeleteMessageBatchResult:907 queue = self._resolve_queue(context, queue_url=queue_url)908 self._assert_permission(context, queue)909 self._assert_batch(entries)910 successful = []911 failed = []912 with queue.mutex:913 for entry in entries:914 try:915 queue.remove(entry["ReceiptHandle"])916 successful.append(DeleteMessageBatchResultEntry(Id=entry["Id"]))917 except Exception as e:918 failed.append(919 BatchResultErrorEntry(920 Id=entry["Id"],921 SenderFault=False,922 Code=e.__class__.__name__,923 Message=str(e),924 )925 )926 return DeleteMessageBatchResult(927 Successful=successful,928 Failed=failed,929 )930 def purge_queue(self, context: RequestContext, queue_url: String) -> None:931 queue = self._resolve_queue(context, queue_url=queue_url)932 self._assert_permission(context, queue)933 with self._mutex:934 # FIXME: use queue-specific locks935 if queue.purge_in_progress:936 raise PurgeQueueInProgress()937 queue.purge_in_progress = True938 # TODO: how do other methods behave when purge is in progress?939 try:940 while True:941 queue.visible.get_nowait()942 except Empty:943 return944 finally:945 queue.purge_in_progress = False946 def set_queue_attributes(947 self, context: RequestContext, queue_url: String, attributes: QueueAttributeMap948 ) -> None:949 queue = self._resolve_queue(context, queue_url=queue_url)950 if not attributes:951 return952 queue.validate_queue_attributes(attributes)953 for k, v in attributes.items():954 queue.attributes[k] = v955 # Special cases956 if queue.attributes.get(QueueAttributeName.Policy) == "":957 del queue.attributes[QueueAttributeName.Policy]958 redrive_policy = queue.attributes.get(QueueAttributeName.RedrivePolicy)959 if redrive_policy:960 _redrive_policy = json.loads(redrive_policy)961 dl_target_arn = _redrive_policy.get("deadLetterTargetArn")962 max_receive_count = _redrive_policy.get("maxReceiveCount")963 # TODO: use the actual AWS responses964 if not dl_target_arn:965 raise InvalidParameterValue(966 "The required parameter 'deadLetterTargetArn' is missing"967 )968 if not max_receive_count:969 raise InvalidParameterValue("The required parameter 'maxReceiveCount' is missing")970 try:971 max_receive_count = int(max_receive_count)972 valid_count = 1 <= max_receive_count <= 1000973 except ValueError:974 valid_count = False975 if not valid_count:976 raise InvalidParameterValue(977 f"Value {redrive_policy} for parameter RedrivePolicy is invalid. Reason: Invalid value for maxReceiveCount: {max_receive_count}, valid values are from 1 to 1000 both inclusive."978 )979 def tag_queue(self, context: RequestContext, queue_url: String, tags: TagMap) -> None:980 queue = self._resolve_queue(context, queue_url=queue_url)981 self._assert_permission(context, queue)982 if not tags:983 return984 for k, v in tags.items():985 queue.tags[k] = v986 def list_queue_tags(self, context: RequestContext, queue_url: String) -> ListQueueTagsResult:987 queue = self._resolve_queue(context, queue_url=queue_url)988 self._assert_permission(context, queue)989 return ListQueueTagsResult(Tags=queue.tags)990 def untag_queue(self, context: RequestContext, queue_url: String, tag_keys: TagKeyList) -> None:991 queue = self._resolve_queue(context, queue_url=queue_url)992 self._assert_permission(context, queue)993 for k in tag_keys:994 if k in queue.tags:995 del queue.tags[k]996 def add_permission(997 self,998 context: RequestContext,999 queue_url: String,1000 label: String,1001 aws_account_ids: AWSAccountIdList,1002 actions: ActionNameList,1003 ) -> None:1004 queue = self._resolve_queue(context, queue_url=queue_url)1005 self._assert_permission(context, queue)1006 self._validate_actions(actions)1007 for account_id in aws_account_ids:1008 for action in actions:1009 queue.permissions.add(Permission(label, account_id, action))1010 def remove_permission(self, context: RequestContext, queue_url: String, label: String) -> None:1011 queue = self._resolve_queue(context, queue_url=queue_url)1012 self._assert_permission(context, queue)1013 candidates = [p for p in queue.permissions if p.label == label]1014 if candidates:1015 queue.permissions.remove(candidates[0])1016 def _create_message_attributes(1017 self,1018 context: RequestContext,1019 message_system_attributes: MessageBodySystemAttributeMap = None,1020 ) -> Dict[MessageSystemAttributeName, str]:1021 result: Dict[MessageSystemAttributeName, str] = {1022 MessageSystemAttributeName.SenderId: context.account_id,1023 MessageSystemAttributeName.SentTimestamp: str(now()),1024 }1025 if message_system_attributes is not None:1026 for attr in message_system_attributes:1027 result[attr] = message_system_attributes[attr]["StringValue"]1028 return result1029 def _validate_queue_attributes(self, attributes: QueueAttributeMap):1030 valid = [k[1] for k in inspect.getmembers(QueueAttributeName)]1031 for k in attributes.keys():1032 if k not in valid:1033 raise InvalidAttributeName("Unknown Attribute %s" % k)1034 def _validate_actions(self, actions: ActionNameList):1035 service = load_service(service=self.service, version=self.version)1036 # FIXME: this is a bit of a heuristic as it will also include actions like "ListQueues" which is not1037 # associated with an action on a queue1038 valid = list(service.operation_names)1039 valid.append("*")1040 for action in actions:1041 if action not in valid:1042 raise InvalidParameterValue(1043 f"Value SQS:{action} for parameter ActionName is invalid. Reason: Please refer to the appropriate "1044 "WSDL for a list of valid actions. "1045 )1046 def _assert_permission(self, context: RequestContext, queue: SqsQueue):1047 action = context.operation.name1048 account_id = context.account_id1049 if account_id == queue.owner:1050 return1051 for permission in queue.permissions:1052 if permission.account_id != account_id:1053 continue1054 if permission.action == "*":1055 return1056 if permission.action == action:1057 return1058 raise CommonServiceException("AccessDeniedException", "Not allowed")1059 def _assert_batch(self, batch: List):1060 if not batch:1061 raise EmptyBatchRequest1062 visited = set()1063 for entry in batch:1064 # TODO: InvalidBatchEntryId1065 if entry["Id"] in visited:1066 raise BatchEntryIdsNotDistinct()1067 else:1068 visited.add(entry["Id"])1069def _create_mock_sequence_number():1070 return "".join(random.choice(string.digits) for _ in range(20))1071# Method from moto's attribute_md5 of moto/sqs/models.py, separated from the Message Object1072def _create_message_attribute_hash(message_attributes) -> Optional[str]:1073 # To avoid the need to check for dict conformity everytime we invoke this function...

Full Screen

Full Screen

target2d_utils.py

Source:target2d_utils.py Github

copy

Full Screen

...111 """112 dtype = _get_dtype(dtype)113 out = torch.stack([torch.eye(3, dtype=dtype)] * batch_size)114 if pos is not None:115 _assert_batch(batch_size, pos, ndim=2, msg="make_transform2d(pos= ")116 out[..., -1] = expand_vec2d(pos, dims=2, dtype=dtype, homogeneous=True)[..., -1]117 if scale is not None:118 _assert_batch(batch_size, scale, ndim=2, msg="make_transform2d(scale= ")119 out = out*expand_vec2d(scale, dims=2, dtype=dtype, homogeneous=True)120 if angle is not None:121 _assert_batch(batch_size, angle, ndim=1, msg="make_transform2d(angle= ")122 out = make_rotation(angle, mode) @ out123 if repos is not None:124 _assert_batch(batch_size, pos, ndim=2, msg="make_transform2d(repos= ")125 out[..., -1] += expand_vec2d(repos, dims=2, dtype=dtype, homogeneous=True)[..., -1]126 return out.unsqueeze(1)127def _assert_batch(batch_size, data, ndim=2, msg=""):128 """ could fail if data passed as list"""129 if torch.is_tensor(data):130 if data.ndim == ndim:131 assert data.shape[0] in (1, batch_size), f"{msg}mismatch, batch size got {data.shape[0]}, expeced {batch_size}"132 assert data.ndim <= ndim, f"too many dimensions {data.ndim} expected <= {ndim}"133def make_rotation(val, mode='xpath', homogeneous=True):134 """ make batch of counterclockwise rotations135 returns tensor shape (n, 3,3) where n = len(val)136 """137 if isinstance(val, (int, float)) or (torch.is_tensor(val) and val.ndim == 0):138 val = [val]139 cos = torch.cos(torch.as_tensor(val))140 sin = (1 if mode[0] == 'y' else -1)*torch.sin(torch.as_tensor(val))141 out = []...

Full Screen

Full Screen

test_tools.py

Source:test_tools.py Github

copy

Full Screen

...18 datasets=[DataSet("X01", 1)],19 results=[],20 )21 ]22 def _assert_batch(23 self,24 project: Project,25 batch: BatchFile,26 filename_regexp: str,27 body_regexp: str,28 ):29 # smoke tests on file name30 self.assertTrue(batch._filename.startswith(str(project.scripts_dir)))31 self.assertRegex(batch._filename, filename_regexp)32 # smoke test on script body33 self.assertRegex(batch._body, body_regexp)34 @db_session35 def test_xdsapp(self):36 dataset = self.project.get_dataset(1)37 options = ProcessOptions(None, None)38 batch = generate_process_batch(Tool.XDSAPP, self.project, dataset, options)39 self._assert_batch(self.project, batch, "xdsapp.*sh", "xdsapp")40 @db_session41 def test_xds(self):42 dataset = self.project.get_dataset(1)43 options = ProcessOptions(None, None)44 batch = generate_process_batch(Tool.XDS, self.project, dataset, options)45 self._assert_batch(self.project, batch, "xds.*sh", r"xia2.*pipeline\=3dii")46 @db_session47 def test_dials(self):48 dataset = self.project.get_dataset(1)49 options = ProcessOptions(None, None)50 batch = generate_process_batch(Tool.DIALS, self.project, dataset, options)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful