How to use _put_records_to_s3_bucket method in localstack

Best Python code snippet using localstack_python

provider.py

Source:provider.py Github

copy

Full Screen

...433 unprocessed_records,434 )435 if "S3DestinationDescription" in destination:436 s3_dest_desc = destination["S3DestinationDescription"]437 self._put_records_to_s3_bucket(delivery_stream_name, records, s3_dest_desc)438 if "HttpEndpointDestinationDescription" in destination:439 http_dest = destination["HttpEndpointDestinationDescription"]440 end_point = http_dest["EndpointConfiguration"]441 url = end_point["Url"]442 record_to_send = {443 "requestId": str(uuid.uuid4()),444 "timestamp": (int(time.time())),445 "records": [],446 }447 for record in records:448 data = record.get("Data") or record.get("data")449 record_to_send["records"].append({"data": to_str(data)})450 headers = {451 "Content-Type": "application/json",452 }453 try:454 requests.post(url, json=record_to_send, headers=headers)455 except Exception as e:456 LOG.exception(f"Unable to put Firehose records to HTTP endpoint {url}.")457 raise e458 return [459 PutRecordBatchResponseEntry(RecordId=str(uuid.uuid4())) for _ in unprocessed_records460 ]461 def _put_to_search_db(462 self, db_flavor, db_description, delivery_stream_name, records, unprocessed_records463 ):464 """465 sends Firehose records to an ElasticSearch or Opensearch database466 """467 search_db_index = db_description["IndexName"]468 search_db_type = db_description.get("TypeName")469 region = aws_stack.get_region()470 domain_arn = db_description.get("DomainARN")471 cluster_endpoint = db_description.get("ClusterEndpoint")472 if cluster_endpoint is None:473 cluster_endpoint = aws_stack.get_opensearch_endpoint(domain_arn)474 db_connection = get_search_db_connection(cluster_endpoint, region)475 if db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.AllDocuments:476 s3_dest_desc = db_description.get("S3DestinationDescription")477 if s3_dest_desc:478 try:479 self._put_records_to_s3_bucket(480 stream_name=delivery_stream_name,481 records=unprocessed_records,482 s3_destination_description=s3_dest_desc,483 )484 except Exception as e:485 LOG.warning("Unable to backup unprocessed records to S3. Error: %s", e)486 else:487 LOG.warning("Passed S3BackupMode without S3Configuration. Cannot backup...")488 elif db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.FailedDocumentsOnly:489 # TODO support FailedDocumentsOnly as well490 LOG.warning("S3BackupMode FailedDocumentsOnly is set but currently not supported.")491 for record in records:492 obj_id = uuid.uuid4()493 data = "{}"494 # DirectPut495 if "Data" in record:496 data = base64.b64decode(record["Data"])497 # KinesisAsSource498 elif "data" in record:499 data = base64.b64decode(record["data"])500 try:501 body = json.loads(data)502 except Exception as e:503 LOG.warning(f"{db_flavor} only allows json input data!")504 raise e505 LOG.debug(506 "Publishing to {} destination. Data: {}".format(507 db_flavor, truncate(data, max_length=300)508 )509 )510 try:511 db_connection.create(512 index=search_db_index, doc_type=search_db_type, id=obj_id, body=body513 )514 except Exception as e:515 LOG.exception(f"Unable to put record to stream {delivery_stream_name}.")516 raise e517 def _add_missing_record_attributes(self, records: List[Dict]) -> None:518 def _get_entry(obj, key):519 return obj.get(key) or obj.get(first_char_to_lower(key))520 for record in records:521 if not _get_entry(record, "ApproximateArrivalTimestamp"):522 record["ApproximateArrivalTimestamp"] = int(now_utc(millis=True))523 if not _get_entry(record, "KinesisRecordMetadata"):524 record["kinesisRecordMetadata"] = {525 "shardId": "shardId-000000000000",526 # not really documented what AWS is using internally - simply using a random UUID here527 "partitionKey": str(uuid.uuid4()),528 "approximateArrivalTimestamp": timestamp(529 float(_get_entry(record, "ApproximateArrivalTimestamp")) / 1000,530 format=TIMESTAMP_FORMAT_MICROS,531 ),532 "sequenceNumber": next_sequence_number(),533 "subsequenceNumber": "",534 }535 def _preprocess_records(self, processor: Dict, records: List[Record]) -> List[Dict]:536 """Preprocess the list of records by calling the given processor (e.g., Lamnda function)."""537 proc_type = processor.get("Type")538 parameters = processor.get("Parameters", [])539 parameters = {p["ParameterName"]: p["ParameterValue"] for p in parameters}540 if proc_type == "Lambda":541 lambda_arn = parameters.get("LambdaArn")542 # TODO: add support for other parameters, e.g., NumberOfRetries, BufferSizeInMBs, BufferIntervalInSeconds, ...543 client = aws_stack.connect_to_service("lambda")544 records = keys_to_lower(records)545 # Convert the record data to string (for json serialization)546 for record in records:547 if "data" in record:548 record["data"] = to_str(record["data"])549 if "Data" in record:550 record["Data"] = to_str(record["Data"])551 event = {"records": records}552 event = to_bytes(json.dumps(event))553 response = client.invoke(FunctionName=lambda_arn, Payload=event)554 result = response.get("Payload").read()555 result = json.loads(to_str(result))556 records = result.get("records", []) if result else []557 else:558 LOG.warning("Unsupported Firehose processor type '%s'", proc_type)559 return records560 def _put_records_to_s3_bucket(561 self,562 stream_name: str,563 records: List[Dict],564 s3_destination_description: S3DestinationDescription,565 ):566 bucket = s3_bucket_name(s3_destination_description["BucketARN"])567 prefix = s3_destination_description.get("Prefix", "")568 s3 = connect_to_resource("s3")569 batched_data = b"".join([base64.b64decode(r.get("Data") or r.get("data")) for r in records])570 obj_path = self._get_s3_object_path(stream_name, prefix)571 try:572 LOG.debug("Publishing to S3 destination: %s. Data: %s", bucket, batched_data)573 s3.Object(bucket, obj_path).put(Body=batched_data)574 except Exception as e:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful