How to use _add_missing_record_attributes method in localstack

Best Python code snippet using localstack_python

provider.py

Source:provider.py Github

copy

Full Screen

...402 f"Firehose {delivery_stream_name} under account {TEST_AWS_ACCOUNT_ID} "403 f"not found."404 )405 # preprocess records, add any missing attributes406 self._add_missing_record_attributes(unprocessed_records)407 for destination in delivery_stream_description.get("Destinations", []):408 # apply processing steps to incoming items409 proc_config = {}410 for child in destination.values():411 proc_config = (412 isinstance(child, dict) and child.get("ProcessingConfiguration") or proc_config413 )414 records = list(unprocessed_records)415 if proc_config.get("Enabled") is not False:416 for processor in proc_config.get("Processors", []):417 # TODO: run processors asynchronously, to avoid request timeouts on PutRecord API calls418 records = self._preprocess_records(processor, records)419 if "ElasticsearchDestinationDescription" in destination:420 self._put_to_search_db(421 "ElasticSearch",422 destination["ElasticsearchDestinationDescription"],423 delivery_stream_name,424 records,425 unprocessed_records,426 )427 if "AmazonopensearchserviceDestinationDescription" in destination:428 self._put_to_search_db(429 "OpenSearch",430 destination["AmazonopensearchserviceDestinationDescription"],431 delivery_stream_name,432 records,433 unprocessed_records,434 )435 if "S3DestinationDescription" in destination:436 s3_dest_desc = destination["S3DestinationDescription"]437 self._put_records_to_s3_bucket(delivery_stream_name, records, s3_dest_desc)438 if "HttpEndpointDestinationDescription" in destination:439 http_dest = destination["HttpEndpointDestinationDescription"]440 end_point = http_dest["EndpointConfiguration"]441 url = end_point["Url"]442 record_to_send = {443 "requestId": str(uuid.uuid4()),444 "timestamp": (int(time.time())),445 "records": [],446 }447 for record in records:448 data = record.get("Data") or record.get("data")449 record_to_send["records"].append({"data": to_str(data)})450 headers = {451 "Content-Type": "application/json",452 }453 try:454 requests.post(url, json=record_to_send, headers=headers)455 except Exception as e:456 LOG.exception(f"Unable to put Firehose records to HTTP endpoint {url}.")457 raise e458 return [459 PutRecordBatchResponseEntry(RecordId=str(uuid.uuid4())) for _ in unprocessed_records460 ]461 def _put_to_search_db(462 self, db_flavor, db_description, delivery_stream_name, records, unprocessed_records463 ):464 """465 sends Firehose records to an ElasticSearch or Opensearch database466 """467 search_db_index = db_description["IndexName"]468 search_db_type = db_description.get("TypeName")469 region = aws_stack.get_region()470 domain_arn = db_description.get("DomainARN")471 cluster_endpoint = db_description.get("ClusterEndpoint")472 if cluster_endpoint is None:473 cluster_endpoint = aws_stack.get_opensearch_endpoint(domain_arn)474 db_connection = get_search_db_connection(cluster_endpoint, region)475 if db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.AllDocuments:476 s3_dest_desc = db_description.get("S3DestinationDescription")477 if s3_dest_desc:478 try:479 self._put_records_to_s3_bucket(480 stream_name=delivery_stream_name,481 records=unprocessed_records,482 s3_destination_description=s3_dest_desc,483 )484 except Exception as e:485 LOG.warning("Unable to backup unprocessed records to S3. Error: %s", e)486 else:487 LOG.warning("Passed S3BackupMode without S3Configuration. Cannot backup...")488 elif db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.FailedDocumentsOnly:489 # TODO support FailedDocumentsOnly as well490 LOG.warning("S3BackupMode FailedDocumentsOnly is set but currently not supported.")491 for record in records:492 obj_id = uuid.uuid4()493 data = "{}"494 # DirectPut495 if "Data" in record:496 data = base64.b64decode(record["Data"])497 # KinesisAsSource498 elif "data" in record:499 data = base64.b64decode(record["data"])500 try:501 body = json.loads(data)502 except Exception as e:503 LOG.warning(f"{db_flavor} only allows json input data!")504 raise e505 LOG.debug(506 "Publishing to {} destination. Data: {}".format(507 db_flavor, truncate(data, max_length=300)508 )509 )510 try:511 db_connection.create(512 index=search_db_index, doc_type=search_db_type, id=obj_id, body=body513 )514 except Exception as e:515 LOG.exception(f"Unable to put record to stream {delivery_stream_name}.")516 raise e517 def _add_missing_record_attributes(self, records: List[Dict]) -> None:518 def _get_entry(obj, key):519 return obj.get(key) or obj.get(first_char_to_lower(key))520 for record in records:521 if not _get_entry(record, "ApproximateArrivalTimestamp"):522 record["ApproximateArrivalTimestamp"] = int(now_utc(millis=True))523 if not _get_entry(record, "KinesisRecordMetadata"):524 record["kinesisRecordMetadata"] = {525 "shardId": "shardId-000000000000",526 # not really documented what AWS is using internally - simply using a random UUID here527 "partitionKey": str(uuid.uuid4()),528 "approximateArrivalTimestamp": timestamp(529 float(_get_entry(record, "ApproximateArrivalTimestamp")) / 1000,530 format=TIMESTAMP_FORMAT_MICROS,531 ),...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful