How to use s3_client_for_region method in localstack

Best Python code snippet using localstack_python

test_s3.py

Source:test_s3.py Github

copy

Full Screen

...55if TYPE_CHECKING:56 from mypy_boto3_s3 import S3Client57LOG = logging.getLogger(__name__)58@pytest.fixture(scope="class")59def s3_client_for_region():60 def _s3_client(61 region_name: str = None,62 ):63 return _client("s3", region_name=region_name)64 return _s3_client65@pytest.fixture66def s3_create_bucket_with_client(s3_resource):67 buckets = []68 def factory(s3_client, **kwargs) -> str:69 if "Bucket" not in kwargs:70 kwargs["Bucket"] = f"test-bucket-{short_uid()}"71 response = s3_client.create_bucket(**kwargs)72 buckets.append(kwargs["Bucket"])73 return response74 yield factory75 # cleanup76 for bucket in buckets:77 try:78 bucket = s3_resource.Bucket(bucket)79 bucket.objects.all().delete()80 bucket.object_versions.all().delete()81 bucket.delete()82 except Exception as e:83 LOG.debug(f"error cleaning up bucket {bucket}: {e}")84@pytest.fixture85def s3_multipart_upload(s3_client):86 def perform_multipart_upload(bucket, key, data=None, zipped=False, acl=None):87 kwargs = {"ACL": acl} if acl else {}88 multipart_upload_dict = s3_client.create_multipart_upload(Bucket=bucket, Key=key, **kwargs)89 upload_id = multipart_upload_dict["UploadId"]90 # Write contents to memory rather than a file.91 data = data or (5 * short_uid())92 data = to_bytes(data)93 upload_file_object = BytesIO(data)94 if zipped:95 upload_file_object = BytesIO()96 with gzip.GzipFile(fileobj=upload_file_object, mode="w") as filestream:97 filestream.write(data)98 response = s3_client.upload_part(99 Bucket=bucket,100 Key=key,101 Body=upload_file_object,102 PartNumber=1,103 UploadId=upload_id,104 )105 multipart_upload_parts = [{"ETag": response["ETag"], "PartNumber": 1}]106 return s3_client.complete_multipart_upload(107 Bucket=bucket,108 Key=key,109 MultipartUpload={"Parts": multipart_upload_parts},110 UploadId=upload_id,111 )112 return perform_multipart_upload113@pytest.fixture114def create_tmp_folder_lambda():115 cleanup_folders = []116 def prepare_folder(path_to_lambda, run_command=None):117 tmp_dir = tempfile.mkdtemp()118 shutil.copy(path_to_lambda, tmp_dir)119 if run_command:120 run(f"cd {tmp_dir}; {run_command}")121 cleanup_folders.append(tmp_dir)122 return tmp_dir123 yield prepare_folder124 for folder in cleanup_folders:125 try:126 shutil.rmtree(folder)127 except Exception:128 LOG.warning(f"could not delete folder {folder}")129class TestS3:130 @pytest.mark.aws_validated131 @pytest.mark.skip_snapshot_verify(paths=["$..EncodingType"])132 def test_region_header_exists(self, s3_client, s3_create_bucket, snapshot):133 snapshot.add_transformer(snapshot.transform.s3_api())134 bucket_name = s3_create_bucket(135 CreateBucketConfiguration={"LocationConstraint": "eu-west-1"},136 )137 response = s3_client.head_bucket(Bucket=bucket_name)138 assert response["ResponseMetadata"]["HTTPHeaders"]["x-amz-bucket-region"] == "eu-west-1"139 snapshot.match("head_bucket", response)140 response = s3_client.list_objects_v2(Bucket=bucket_name)141 assert response["ResponseMetadata"]["HTTPHeaders"]["x-amz-bucket-region"] == "eu-west-1"142 snapshot.match("list_objects_v2", response)143 @pytest.mark.aws_validated144 # TODO list-buckets contains other buckets when running in CI145 @pytest.mark.skip_snapshot_verify(146 paths=["$..Marker", "$..Prefix", "$..EncodingType", "$..list-buckets.Buckets"]147 )148 def test_delete_bucket_with_content(self, s3_client, s3_resource, s3_bucket, snapshot):149 snapshot.add_transformer(snapshot.transform.s3_api())150 bucket_name = s3_bucket151 for i in range(0, 10, 1):152 body = "test-" + str(i)153 key = "test-key-" + str(i)154 s3_client.put_object(Bucket=bucket_name, Key=key, Body=body)155 resp = s3_client.list_objects(Bucket=bucket_name, MaxKeys=100)156 snapshot.match("list-objects", resp)157 assert 10 == len(resp["Contents"])158 bucket = s3_resource.Bucket(bucket_name)159 bucket.objects.all().delete()160 bucket.delete()161 resp = s3_client.list_buckets()162 # TODO - this fails in the CI pipeline and is currently skipped from verification163 snapshot.match("list-buckets", resp)164 assert bucket_name not in [b["Name"] for b in resp["Buckets"]]165 @pytest.mark.aws_validated166 @pytest.mark.skip_snapshot_verify(paths=["$..VersionId", "$..ContentLanguage"])167 def test_put_and_get_object_with_utf8_key(self, s3_client, s3_bucket, snapshot):168 snapshot.add_transformer(snapshot.transform.s3_api())169 response = s3_client.put_object(Bucket=s3_bucket, Key="Ā0Ä", Body=b"abc123")170 assert response["ResponseMetadata"]["HTTPStatusCode"] == 200171 snapshot.match("put-object", response)172 response = s3_client.get_object(Bucket=s3_bucket, Key="Ā0Ä")173 snapshot.match("get-object", response)174 assert response["Body"].read() == b"abc123"175 @pytest.mark.aws_validated176 def test_resource_object_with_slashes_in_key(self, s3_resource, s3_bucket):177 s3_resource.Object(s3_bucket, "/foo").put(Body="foobar")178 s3_resource.Object(s3_bucket, "bar").put(Body="barfoo")179 with pytest.raises(ClientError) as e:180 s3_resource.Object(s3_bucket, "foo").get()181 e.match("NoSuchKey")182 with pytest.raises(ClientError) as e:183 s3_resource.Object(s3_bucket, "/bar").get()184 e.match("NoSuchKey")185 response = s3_resource.Object(s3_bucket, "/foo").get()186 assert response["Body"].read() == b"foobar"187 response = s3_resource.Object(s3_bucket, "bar").get()188 assert response["Body"].read() == b"barfoo"189 @pytest.mark.aws_validated190 def test_metadata_header_character_decoding(self, s3_client, s3_bucket, snapshot):191 snapshot.add_transformer(snapshot.transform.s3_api())192 # Object metadata keys should accept keys with underscores193 # https://github.com/localstack/localstack/issues/1790194 # put object195 object_key = "key-with-metadata"196 metadata = {"TEST_META_1": "foo", "__meta_2": "bar"}197 s3_client.put_object(Bucket=s3_bucket, Key=object_key, Metadata=metadata, Body="foo")198 metadata_saved = s3_client.head_object(Bucket=s3_bucket, Key=object_key)["Metadata"]199 snapshot.match("head-object", metadata_saved)200 # note that casing is removed (since headers are case-insensitive)201 assert metadata_saved == {"test_meta_1": "foo", "__meta_2": "bar"}202 @pytest.mark.aws_validated203 @pytest.mark.skip_snapshot_verify(paths=["$..VersionId", "$..ContentLanguage"])204 def test_upload_file_multipart(self, s3_client, s3_bucket, tmpdir, snapshot):205 snapshot.add_transformer(snapshot.transform.s3_api())206 key = "my-key"207 # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3.html#multipart-transfers208 tranfer_config = TransferConfig(multipart_threshold=5 * KB, multipart_chunksize=1 * KB)209 file = tmpdir / "test-file.bin"210 data = b"1" * (6 * KB) # create 6 kilobytes of ones211 file.write(data=data, mode="w")212 s3_client.upload_file(213 Bucket=s3_bucket, Key=key, Filename=str(file.realpath()), Config=tranfer_config214 )215 obj = s3_client.get_object(Bucket=s3_bucket, Key=key)216 assert obj["Body"].read() == data, f"body did not contain expected data {obj}"217 snapshot.match("get_object", obj)218 @pytest.mark.aws_validated219 @pytest.mark.parametrize("delimiter", ["/", "%2F"])220 def test_list_objects_with_prefix(self, s3_client, s3_create_bucket, delimiter):221 bucket_name = s3_create_bucket()222 key = "test/foo/bar/123"223 s3_client.put_object(Bucket=bucket_name, Key=key, Body=b"content 123")224 response = s3_client.list_objects(225 Bucket=bucket_name, Prefix="test/", Delimiter=delimiter, MaxKeys=1, EncodingType="url"226 )227 sub_dict = {228 "Delimiter": delimiter,229 "EncodingType": "url",230 "IsTruncated": False,231 "Marker": "",232 "MaxKeys": 1,233 "Name": bucket_name,234 "Prefix": "test/",235 }236 if delimiter == "/":237 # if delimiter is "/", then common prefixes are returned238 sub_dict["CommonPrefixes"] = [{"Prefix": "test/foo/"}]239 else:240 # if delimiter is "%2F" (or other non-contained character), then the actual keys are returned in Contents241 assert len(response["Contents"]) == 1242 assert response["Contents"][0]["Key"] == key243 sub_dict["Delimiter"] = "%252F"244 assert is_sub_dict(sub_dict, response)245 @pytest.mark.aws_validated246 @pytest.mark.skip_snapshot_verify(path="$..Error.BucketName")247 def test_get_object_no_such_bucket(self, s3_client, snapshot):248 with pytest.raises(ClientError) as e:249 s3_client.get_object(Bucket=f"does-not-exist-{short_uid()}", Key="foobar")250 snapshot.match("expected_error", e.value.response)251 @pytest.mark.aws_validated252 @pytest.mark.skip_snapshot_verify(path="$..RequestID")253 def test_delete_bucket_no_such_bucket(self, s3_client, snapshot):254 with pytest.raises(ClientError) as e:255 s3_client.delete_bucket(Bucket=f"does-not-exist-{short_uid()}")256 snapshot.match("expected_error", e.value.response)257 @pytest.mark.aws_validated258 @pytest.mark.skip_snapshot_verify(path="$..Error.BucketName")259 def test_get_bucket_notification_configuration_no_such_bucket(self, s3_client, snapshot):260 with pytest.raises(ClientError) as e:261 s3_client.get_bucket_notification_configuration(Bucket=f"doesnotexist-{short_uid()}")262 snapshot.match("expected_error", e.value.response)263 @pytest.mark.aws_validated264 @pytest.mark.xfail(265 reason="currently not implemented in moto, see https://github.com/localstack/localstack/issues/6217"266 )267 # TODO: see also XML issue in https://github.com/localstack/localstack/issues/6422268 def test_get_object_attributes(self, s3_client, s3_bucket, snapshot):269 s3_client.put_object(Bucket=s3_bucket, Key="data.txt", Body=b"69\n420\n")270 response = s3_client.get_object_attributes(271 Bucket=s3_bucket,272 Key="data.txt",273 ObjectAttributes=["StorageClass", "ETag", "ObjectSize"],274 )275 snapshot.match("object-attrs", response)276 @pytest.mark.aws_validated277 @pytest.mark.skip_snapshot_verify(paths=["$..VersionId", "$..ContentLanguage"])278 def test_put_and_get_object_with_hash_prefix(self, s3_client, s3_bucket, snapshot):279 snapshot.add_transformer(snapshot.transform.s3_api())280 key_name = "#key-with-hash-prefix"281 content = b"test 123"282 response = s3_client.put_object(Bucket=s3_bucket, Key=key_name, Body=content)283 assert response["ResponseMetadata"]["HTTPStatusCode"] == 200284 snapshot.match("put-object", response)285 response = s3_client.get_object(Bucket=s3_bucket, Key=key_name)286 snapshot.match("get-object", response)287 assert response["Body"].read() == content288 @pytest.mark.aws_validated289 @pytest.mark.xfail(reason="error message is different in current implementation")290 def test_invalid_range_error(self, s3_client, s3_bucket):291 key = "my-key"292 s3_client.put_object(Bucket=s3_bucket, Key=key, Body=b"abcdefgh")293 with pytest.raises(ClientError) as e:294 s3_client.get_object(Bucket=s3_bucket, Key=key, Range="bytes=1024-4096")295 e.match("InvalidRange")296 e.match("The requested range is not satisfiable")297 @pytest.mark.aws_validated298 def test_range_key_not_exists(self, s3_client, s3_bucket):299 key = "my-key"300 with pytest.raises(ClientError) as e:301 s3_client.get_object(Bucket=s3_bucket, Key=key, Range="bytes=1024-4096")302 e.match("NoSuchKey")303 e.match("The specified key does not exist.")304 @pytest.mark.aws_validated305 def test_create_bucket_via_host_name(self, s3_vhost_client):306 # TODO check redirection (happens in AWS because of region name), should it happen in LS?307 # https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#VirtualHostingBackwardsCompatibility308 bucket_name = f"test-{short_uid()}"309 try:310 response = s3_vhost_client.create_bucket(311 Bucket=bucket_name,312 CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},313 )314 assert "Location" in response315 assert response["ResponseMetadata"]["HTTPStatusCode"] == 200316 response = s3_vhost_client.get_bucket_location(Bucket=bucket_name)317 assert response["ResponseMetadata"]["HTTPStatusCode"] == 200318 assert response["LocationConstraint"] == "eu-central-1"319 finally:320 s3_vhost_client.delete_bucket(Bucket=bucket_name)321 @pytest.mark.aws_validated322 def test_put_and_get_bucket_policy(self, s3_client, s3_bucket):323 # put bucket policy324 policy = {325 "Version": "2012-10-17",326 "Statement": [327 {328 "Action": "s3:GetObject",329 "Effect": "Allow",330 "Resource": f"arn:aws:s3:::{s3_bucket}/*",331 "Principal": {"AWS": "*"},332 }333 ],334 }335 response = s3_client.put_bucket_policy(Bucket=s3_bucket, Policy=json.dumps(policy))336 assert response["ResponseMetadata"]["HTTPStatusCode"] == 204337 # retrieve and check policy config338 saved_policy = s3_client.get_bucket_policy(Bucket=s3_bucket)["Policy"]339 assert policy == json.loads(saved_policy)340 @pytest.mark.aws_validated341 @pytest.mark.xfail(reason="see https://github.com/localstack/localstack/issues/5769")342 def test_put_object_tagging_empty_list(self, s3_client, s3_bucket, snapshot):343 key = "my-key"344 s3_client.put_object(Bucket=s3_bucket, Key=key, Body=b"abcdefgh")345 object_tags = s3_client.get_object_tagging(Bucket=s3_bucket, Key=key)346 snapshot.match("created-object-tags", object_tags)347 tag_set = {"TagSet": [{"Key": "tag1", "Value": "tag1"}]}348 s3_client.put_object_tagging(Bucket=s3_bucket, Key=key, Tagging=tag_set)349 object_tags = s3_client.get_object_tagging(Bucket=s3_bucket, Key=key)350 snapshot.match("updated-object-tags", object_tags)351 s3_client.put_object_tagging(Bucket=s3_bucket, Key=key, Tagging={"TagSet": []})352 object_tags = s3_client.get_object_tagging(Bucket=s3_bucket, Key=key)353 snapshot.match("deleted-object-tags", object_tags)354 @pytest.mark.aws_validated355 @pytest.mark.xfail(reason="see https://github.com/localstack/localstack/issues/6218")356 def test_head_object_fields(self, s3_client, s3_bucket, snapshot):357 key = "my-key"358 s3_client.put_object(Bucket=s3_bucket, Key=key, Body=b"abcdefgh")359 response = s3_client.head_object(Bucket=s3_bucket, Key=key)360 # missing AcceptRanges field361 # see https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html362 # https://stackoverflow.com/questions/58541696/s3-not-returning-accept-ranges-header363 # https://www.keycdn.com/support/frequently-asked-questions#is-byte-range-not-working-in-combination-with-s3364 snapshot.match("head-object", response)365 @pytest.mark.aws_validated366 @pytest.mark.xfail(reason="see https://github.com/localstack/localstack/issues/6553")367 def test_get_object_after_deleted_in_versioned_bucket(368 self, s3_client, s3_bucket, s3_resource, snapshot369 ):370 bucket = s3_resource.Bucket(s3_bucket)371 bucket.Versioning().enable()372 key = "my-key"373 s3_client.put_object(Bucket=s3_bucket, Key=key, Body=b"abcdefgh")374 s3_obj = s3_client.get_object(Bucket=s3_bucket, Key=key)375 snapshot.match("get-object", s3_obj)376 s3_client.delete_object(Bucket=s3_bucket, Key=key)377 with pytest.raises(ClientError) as e:378 s3_client.get_object(Bucket=s3_bucket, Key=key)379 snapshot.match("get-object-after-delete", e.value.response)380 @pytest.mark.aws_validated381 @pytest.mark.parametrize("algorithm", ["CRC32", "CRC32C", "SHA1", "SHA256"])382 def test_put_object_checksum(self, s3_client, s3_create_bucket, algorithm):383 bucket = s3_create_bucket()384 key = f"file-{short_uid()}"385 data = b"test data.."386 params = {387 "Bucket": bucket,388 "Key": key,389 "Body": data,390 "ChecksumAlgorithm": algorithm,391 f"Checksum{algorithm}": short_uid(),392 }393 with pytest.raises(ClientError) as e:394 s3_client.put_object(**params)395 error = e.value.response["Error"]396 assert error["Code"] == "InvalidRequest"397 checksum_header = f"x-amz-checksum-{algorithm.lower()}"398 assert error["Message"] == f"Value for {checksum_header} header is invalid."399 # Test our generated checksums400 match algorithm:401 case "CRC32":402 checksum = checksum_crc32(data)403 case "CRC32C":404 checksum = checksum_crc32c(data)405 case "SHA1":406 checksum = hash_sha1(data)407 case "SHA256":408 checksum = hash_sha256(data)409 case _:410 checksum = ""411 params.update({f"Checksum{algorithm}": checksum})412 response = s3_client.put_object(**params)413 assert response["ResponseMetadata"]["HTTPStatusCode"] == 200414 # Test the autogenerated checksums415 params.pop(f"Checksum{algorithm}")416 response = s3_client.put_object(**params)417 assert response["ResponseMetadata"]["HTTPStatusCode"] == 200418 @pytest.mark.aws_validated419 @pytest.mark.skip_snapshot_verify(paths=["$..AcceptRanges"])420 def test_s3_copy_metadata_replace(self, s3_client, s3_create_bucket, snapshot):421 snapshot.add_transformer(snapshot.transform.s3_api())422 object_key = "source-object"423 bucket_name = s3_create_bucket()424 resp = s3_client.put_object(425 Bucket=bucket_name,426 Key=object_key,427 Body='{"key": "value"}',428 ContentType="application/json",429 Metadata={"key": "value"},430 )431 snapshot.match("put_object", resp)432 head_object = s3_client.head_object(Bucket=bucket_name, Key=object_key)433 snapshot.match("head_object", head_object)434 object_key_copy = f"{object_key}-copy"435 resp = s3_client.copy_object(436 Bucket=bucket_name,437 CopySource=f"{bucket_name}/{object_key}",438 Key=object_key_copy,439 Metadata={"another-key": "value"},440 ContentType="application/javascript",441 MetadataDirective="REPLACE",442 )443 snapshot.match("copy_object", resp)444 head_object = s3_client.head_object(Bucket=bucket_name, Key=object_key_copy)445 snapshot.match("head_object_copy", head_object)446 @pytest.mark.aws_validated447 @pytest.mark.skip_snapshot_verify(paths=["$..AcceptRanges"])448 def test_s3_copy_content_type_and_metadata(self, s3_client, s3_create_bucket, snapshot):449 snapshot.add_transformer(snapshot.transform.s3_api())450 object_key = "source-object"451 bucket_name = s3_create_bucket()452 resp = s3_client.put_object(453 Bucket=bucket_name,454 Key=object_key,455 Body='{"key": "value"}',456 ContentType="application/json",457 Metadata={"key": "value"},458 )459 snapshot.match("put_object", resp)460 head_object = s3_client.head_object(Bucket=bucket_name, Key=object_key)461 snapshot.match("head_object", head_object)462 object_key_copy = f"{object_key}-copy"463 resp = s3_client.copy_object(464 Bucket=bucket_name, CopySource=f"{bucket_name}/{object_key}", Key=object_key_copy465 )466 snapshot.match("copy_object", resp)467 head_object = s3_client.head_object(Bucket=bucket_name, Key=object_key_copy)468 snapshot.match("head_object_copy", head_object)469 s3_client.delete_objects(Bucket=bucket_name, Delete={"Objects": [{"Key": object_key_copy}]})470 # does not set MetadataDirective=REPLACE, so the original metadata should be kept471 object_key_copy = f"{object_key}-second-copy"472 resp = s3_client.copy_object(473 Bucket=bucket_name,474 CopySource=f"{bucket_name}/{object_key}",475 Key=object_key_copy,476 Metadata={"another-key": "value"},477 ContentType="application/javascript",478 )479 snapshot.match("copy_object_second", resp)480 head_object = s3_client.head_object(Bucket=bucket_name, Key=object_key_copy)481 snapshot.match("head_object_second_copy", head_object)482 @pytest.mark.aws_validated483 @pytest.mark.xfail(484 reason="wrong behaviour, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/managing-acls.html"485 )486 def test_s3_multipart_upload_acls(487 self, s3_client, s3_create_bucket, s3_multipart_upload, snapshot488 ):489 # The basis for this test is wrong - see:490 # https://docs.aws.amazon.com/AmazonS3/latest/userguide/managing-acls.html491 # > Bucket and object permissions are independent of each other. An object does not inherit the permissions492 # > from its bucket. For example, if you create a bucket and grant write access to a user, you can't access493 # > that user’s objects unless the user explicitly grants you access.494 snapshot.add_transformer(495 [496 snapshot.transform.key_value("DisplayName"),497 snapshot.transform.key_value("ID", value_replacement="owner-id"),498 ]499 )500 bucket_name = f"test-bucket-{short_uid()}"501 s3_create_bucket(Bucket=bucket_name, ACL="public-read")502 response = s3_client.get_bucket_acl(Bucket=bucket_name)503 snapshot.match("bucket-acl", response)504 def check_permissions(key):505 acl_response = s3_client.get_object_acl(Bucket=bucket_name, Key=key)506 snapshot.match(f"permission-{key}", acl_response)507 # perform uploads (multipart and regular) and check ACLs508 s3_client.put_object(Bucket=bucket_name, Key="acl-key0", Body="something")509 check_permissions("acl-key0")510 s3_multipart_upload(bucket=bucket_name, key="acl-key1")511 check_permissions("acl-key1")512 s3_multipart_upload(bucket=bucket_name, key="acl-key2", acl="public-read-write")513 check_permissions("acl-key2")514 @pytest.mark.only_localstack515 @pytest.mark.parametrize("case_sensitive_headers", [True, False])516 def test_s3_get_response_case_sensitive_headers(517 self, s3_client, s3_bucket, case_sensitive_headers518 ):519 # Test that RETURN_CASE_SENSITIVE_HEADERS is respected520 object_key = "key-by-hostname"521 s3_client.put_object(Bucket=s3_bucket, Key=object_key, Body="something")522 # get object and assert headers523 case_sensitive_before = http2_server.RETURN_CASE_SENSITIVE_HEADERS524 try:525 url = s3_client.generate_presigned_url(526 "get_object", Params={"Bucket": s3_bucket, "Key": object_key}527 )528 http2_server.RETURN_CASE_SENSITIVE_HEADERS = case_sensitive_headers529 response = requests.get(url, verify=False)530 # expect that Etag is contained531 header_names = list(response.headers.keys())532 expected_etag = "ETag" if case_sensitive_headers else "etag"533 assert expected_etag in header_names534 finally:535 http2_server.RETURN_CASE_SENSITIVE_HEADERS = case_sensitive_before536 @pytest.mark.aws_validated537 @pytest.mark.skip_snapshot_verify(538 paths=[539 "$..AcceptRanges",540 "$..ContentLanguage",541 "$..VersionId",542 "$..Restore",543 ]544 )545 def test_s3_object_expiry(self, s3_client, s3_bucket, snapshot):546 # AWS only cleans up S3 expired object once a day usually547 # the object stays accessible for quite a while after being expired548 # https://stackoverflow.com/questions/38851456/aws-s3-object-expiration-less-than-24-hours549 # handle s3 object expiry550 # https://github.com/localstack/localstack/issues/1685551 # TODO: should we have a config var to not deleted immediately in the new provider? and schedule it?552 snapshot.add_transformer(snapshot.transform.s3_api())553 # put object554 short_expire = datetime.datetime.now(timezone("GMT")) + datetime.timedelta(seconds=1)555 object_key_expired = "key-object-expired"556 object_key_not_expired = "key-object-not-expired"557 s3_client.put_object(558 Bucket=s3_bucket,559 Key=object_key_expired,560 Body="foo",561 Expires=short_expire,562 )563 # sleep so it expires564 time.sleep(3)565 # head_object does not raise an error for now in LS566 response = s3_client.head_object(Bucket=s3_bucket, Key=object_key_expired)567 assert response["Expires"] < datetime.datetime.now(timezone("GMT"))568 snapshot.match("head-object-expired", response)569 # try to fetch an object which is already expired570 if not is_aws_cloud(): # fixme for now behaviour differs, have a look at it and discuss571 with pytest.raises(Exception) as e: # this does not raise in AWS572 s3_client.get_object(Bucket=s3_bucket, Key=object_key_expired)573 e.match("NoSuchKey")574 s3_client.put_object(575 Bucket=s3_bucket,576 Key=object_key_not_expired,577 Body="foo",578 Expires=datetime.datetime.now(timezone("GMT")) + datetime.timedelta(hours=1),579 )580 # try to fetch has not been expired yet.581 resp = s3_client.get_object(Bucket=s3_bucket, Key=object_key_not_expired)582 assert "Expires" in resp583 assert resp["Expires"] > datetime.datetime.now(timezone("GMT"))584 snapshot.match("get-object-not-yet-expired", resp)585 @pytest.mark.aws_validated586 @pytest.mark.skip_snapshot_verify(587 paths=[588 "$..ContentLanguage",589 "$..VersionId",590 ]591 )592 def test_upload_file_with_xml_preamble(self, s3_client, s3_create_bucket, snapshot):593 snapshot.add_transformer(snapshot.transform.s3_api())594 bucket_name = f"bucket-{short_uid()}"595 object_key = f"key-{short_uid()}"596 body = '<?xml version="1.0" encoding="UTF-8"?><test/>'597 s3_create_bucket(Bucket=bucket_name)598 s3_client.put_object(Bucket=bucket_name, Key=object_key, Body=body)599 response = s3_client.get_object(Bucket=bucket_name, Key=object_key)600 snapshot.match("get_object", response)601 @pytest.mark.aws_validated602 @pytest.mark.xfail(reason="The error format is wrong in s3_listener (is_bucket_available)")603 def test_bucket_availability(self, s3_client, snapshot):604 bucket_name = "test-bucket-lifecycle"605 with pytest.raises(ClientError) as e:606 s3_client.get_bucket_lifecycle(Bucket=bucket_name)607 snapshot.match("bucket-lifecycle", e.value.response)608 with pytest.raises(ClientError) as e:609 s3_client.get_bucket_replication(Bucket=bucket_name)610 snapshot.match("bucket-replication", e.value.response)611 @pytest.mark.aws_validated612 def test_location_path_url(self, s3_client, s3_create_bucket, account_id):613 region = "us-east-2"614 bucket_name = s3_create_bucket(615 CreateBucketConfiguration={"LocationConstraint": region}, ACL="public-read"616 )617 response = s3_client.get_bucket_location(Bucket=bucket_name)618 assert region == response["LocationConstraint"]619 url = _bucket_url(bucket_name, region)620 # https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLocation.html621 # make raw request, assert that newline is contained after XML preamble: <?xml ...>\n622 response = requests.get(f"{url}?location?x-amz-expected-bucket-owner={account_id}")623 assert response.ok624 content = to_str(response.content)625 assert re.match(r"^<\?xml [^>]+>\n<.*", content, flags=re.MULTILINE)626 @pytest.mark.aws_validated627 @pytest.mark.skip_snapshot_verify(paths=["$..Error.RequestID"])628 def test_different_location_constraint(629 self,630 s3_client,631 s3_create_bucket,632 s3_client_for_region,633 s3_create_bucket_with_client,634 snapshot,635 ):636 snapshot.add_transformer(snapshot.transform.s3_api())637 snapshot.add_transformer(638 snapshot.transform.key_value("Location", "<location>", reference_replacement=False)639 )640 bucket_1_name = f"bucket-{short_uid()}"641 s3_create_bucket(Bucket=bucket_1_name)642 response = s3_client.get_bucket_location(Bucket=bucket_1_name)643 snapshot.match("get_bucket_location_bucket_1", response)644 region_2 = "us-east-2"645 client_2 = s3_client_for_region(region_name=region_2)646 bucket_2_name = f"bucket-{short_uid()}"647 s3_create_bucket_with_client(648 client_2,649 Bucket=bucket_2_name,650 CreateBucketConfiguration={"LocationConstraint": region_2},651 )652 response = client_2.get_bucket_location(Bucket=bucket_2_name)653 snapshot.match("get_bucket_location_bucket_2", response)654 # assert creation fails without location constraint for us-east-2 region655 with pytest.raises(Exception) as exc:656 client_2.create_bucket(Bucket=f"bucket-{short_uid()}")657 snapshot.match("create_bucket_constraint_exc", exc.value.response)658 bucket_3_name = f"bucket-{short_uid()}"659 response = s3_create_bucket_with_client(...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful