Best Python code snippet using localstack_python
provider.py
Source:provider.py  
...414            records = list(unprocessed_records)415            if proc_config.get("Enabled") is not False:416                for processor in proc_config.get("Processors", []):417                    # TODO: run processors asynchronously, to avoid request timeouts on PutRecord API calls418                    records = self._preprocess_records(processor, records)419            if "ElasticsearchDestinationDescription" in destination:420                self._put_to_search_db(421                    "ElasticSearch",422                    destination["ElasticsearchDestinationDescription"],423                    delivery_stream_name,424                    records,425                    unprocessed_records,426                )427            if "AmazonopensearchserviceDestinationDescription" in destination:428                self._put_to_search_db(429                    "OpenSearch",430                    destination["AmazonopensearchserviceDestinationDescription"],431                    delivery_stream_name,432                    records,433                    unprocessed_records,434                )435            if "S3DestinationDescription" in destination:436                s3_dest_desc = destination["S3DestinationDescription"]437                self._put_records_to_s3_bucket(delivery_stream_name, records, s3_dest_desc)438            if "HttpEndpointDestinationDescription" in destination:439                http_dest = destination["HttpEndpointDestinationDescription"]440                end_point = http_dest["EndpointConfiguration"]441                url = end_point["Url"]442                record_to_send = {443                    "requestId": str(uuid.uuid4()),444                    "timestamp": (int(time.time())),445                    "records": [],446                }447                for record in records:448                    data = record.get("Data") or record.get("data")449                    record_to_send["records"].append({"data": to_str(data)})450                headers = {451                    "Content-Type": "application/json",452                }453                try:454                    requests.post(url, json=record_to_send, headers=headers)455                except Exception as e:456                    LOG.exception(f"Unable to put Firehose records to HTTP endpoint {url}.")457                    raise e458        return [459            PutRecordBatchResponseEntry(RecordId=str(uuid.uuid4())) for _ in unprocessed_records460        ]461    def _put_to_search_db(462        self, db_flavor, db_description, delivery_stream_name, records, unprocessed_records463    ):464        """465        sends Firehose records to an ElasticSearch or Opensearch database466        """467        search_db_index = db_description["IndexName"]468        search_db_type = db_description.get("TypeName")469        region = aws_stack.get_region()470        domain_arn = db_description.get("DomainARN")471        cluster_endpoint = db_description.get("ClusterEndpoint")472        if cluster_endpoint is None:473            cluster_endpoint = aws_stack.get_opensearch_endpoint(domain_arn)474        db_connection = get_search_db_connection(cluster_endpoint, region)475        if db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.AllDocuments:476            s3_dest_desc = db_description.get("S3DestinationDescription")477            if s3_dest_desc:478                try:479                    self._put_records_to_s3_bucket(480                        stream_name=delivery_stream_name,481                        records=unprocessed_records,482                        s3_destination_description=s3_dest_desc,483                    )484                except Exception as e:485                    LOG.warning("Unable to backup unprocessed records to S3. Error: %s", e)486            else:487                LOG.warning("Passed S3BackupMode without S3Configuration. Cannot backup...")488        elif db_description.get("S3BackupMode") == ElasticsearchS3BackupMode.FailedDocumentsOnly:489            # TODO support FailedDocumentsOnly as well490            LOG.warning("S3BackupMode FailedDocumentsOnly is set but currently not supported.")491        for record in records:492            obj_id = uuid.uuid4()493            data = "{}"494            # DirectPut495            if "Data" in record:496                data = base64.b64decode(record["Data"])497            # KinesisAsSource498            elif "data" in record:499                data = base64.b64decode(record["data"])500            try:501                body = json.loads(data)502            except Exception as e:503                LOG.warning(f"{db_flavor} only allows json input data!")504                raise e505            LOG.debug(506                "Publishing to {} destination. Data: {}".format(507                    db_flavor, truncate(data, max_length=300)508                )509            )510            try:511                db_connection.create(512                    index=search_db_index, doc_type=search_db_type, id=obj_id, body=body513                )514            except Exception as e:515                LOG.exception(f"Unable to put record to stream {delivery_stream_name}.")516                raise e517    def _add_missing_record_attributes(self, records: List[Dict]) -> None:518        def _get_entry(obj, key):519            return obj.get(key) or obj.get(first_char_to_lower(key))520        for record in records:521            if not _get_entry(record, "ApproximateArrivalTimestamp"):522                record["ApproximateArrivalTimestamp"] = int(now_utc(millis=True))523            if not _get_entry(record, "KinesisRecordMetadata"):524                record["kinesisRecordMetadata"] = {525                    "shardId": "shardId-000000000000",526                    # not really documented what AWS is using internally - simply using a random UUID here527                    "partitionKey": str(uuid.uuid4()),528                    "approximateArrivalTimestamp": timestamp(529                        float(_get_entry(record, "ApproximateArrivalTimestamp")) / 1000,530                        format=TIMESTAMP_FORMAT_MICROS,531                    ),532                    "sequenceNumber": next_sequence_number(),533                    "subsequenceNumber": "",534                }535    def _preprocess_records(self, processor: Dict, records: List[Record]) -> List[Dict]:536        """Preprocess the list of records by calling the given processor (e.g., Lamnda function)."""537        proc_type = processor.get("Type")538        parameters = processor.get("Parameters", [])539        parameters = {p["ParameterName"]: p["ParameterValue"] for p in parameters}540        if proc_type == "Lambda":541            lambda_arn = parameters.get("LambdaArn")542            # TODO: add support for other parameters, e.g., NumberOfRetries, BufferSizeInMBs, BufferIntervalInSeconds, ...543            client = aws_stack.connect_to_service("lambda")544            records = keys_to_lower(records)545            # Convert the record data to string (for json serialization)546            for record in records:547                if "data" in record:548                    record["data"] = to_str(record["data"])549                if "Data" in record:...generate.py
Source:generate.py  
...69            image_string, channels=self.before_shape[-1])70        image = tf.image.convert_image_dtype(image, dtype=tf.float32)71        image = self.augment(image, ensure_shape='stretch')72        return image, name73    def _preprocess_records(self, serialized):74        # unserialize and prepocess image75        buffer, label, bbox, count, bbox_label, text = \76            self._parse_proto(serialized)77        # decode jpeg image78        channels = self.before_shape[-1]79        image = self._decode_jpeg(buffer, channels)80        # augment image81        augment_bbox = tf.expand_dims(bbox[:count], 0)82        augment = Augment(image, augment_bbox, self.after_shape, self.moment)83        actions = self._actions(self.mode) + self._actions('final_cpu')84        image = augment.augment(actions)85        values = [image]86        truth_map = {87            'class/label': label,...main.py
Source:main.py  
...17        :return: A list of records18        :rtype: list19        """20        return list(response.values())[0] if response else []21    def _preprocess_records(self, records):22        """Preprocesses the records for easy writing to db23        :param records: Records from the kafka response24        :type records: list25        :return: A list fo tuples26        :rtype: list(tuple)27        """28        preprocessed = []29        for r in records:30            v = r.value31            preprocessed.append(32                (v['site'],33                 v['status'],34                 v['latency'],35                 v['regex_found'],36                 datetime.datetime.fromtimestamp(r.timestamp / 1000))37            )38        return preprocessed39    def run(self):40        """Run a loop which checks the Kafka topic for new stuff and41        then preprocesses it and stores to postgres"""42        dbclient().create_measurements_table()43        print("Initialized successfully!")44        while True:45            response = self.consumer.poll()46            records = self._record_from_response(response)47            preprocessed_records = self._preprocess_records(records)48            if preprocessed_records != []:49                dbclient().batch_insert_measurements(preprocessed_records)50            time.sleep(self.poll_interval)51if __name__ == "__main__":52    ktdb = KafkaToDB()...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
