diff --git a/.gitignore b/.gitignore index 243430d7..2e7bc650 100644 --- a/.gitignore +++ b/.gitignore @@ -95,3 +95,6 @@ files dataset.zip **/.terraform/* + +# LocalStack default volume directory if override is not provided through environment variables +./volume diff --git a/README.md b/README.md index 466155ed..1c8d3468 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ pushing changes. You can also configure your IDE to reformat code when you save ## Security -Running uttu with vanilla security features requires an Oauth2 issuer, which can be set with the following property: +Running uttu with vanilla security features requires an OAuth2 issuer, which can be set with the following property: ```properties uttu.security.jwt.issuer-uri=https://my-jwt-issuer @@ -75,35 +75,46 @@ If you don't use Google PubSub, sett this property: ## Running locally +### Local Environment through Docker Compose + +Uttu has [docker-compose.yml](./docker-compose.yml) which contains all necessary dependent services for running uttu in +various configurations. It is assumed this environment is always running when the service is being run locally +(see below). + +> **Note!** This uses the compose version included with modern versions of Docker, not the separately installable +> `docker-compose` command. + +All Docker Compose commands run in relation to the `docker-compose.yml` file located in the same directory in which the +command is executed. + +```shell +# run with defaults - use ^C to shutdown containers +docker compose up +# run with additional profiles, e.g. with LocalStack based AWS simulator +docker compose --profile aws up +# run in background +docker compose up -d # or --detach +# shutdown containers +docker compose down +# shutdown containers included in specific profile +docker compose --profile aws down +``` + +See [Docker Compose reference](https://docs.docker.com/compose/reference/) for more details. + ### Build To build the project from source, you need Java 21 and Maven 3. ### Database -#### Via Docker - -Install Docker. +#### Via Docker Compose +Ensure database is up with ```shell -docker run \ - --platform linux/amd64 \ - --name=uttu \ - -d \ - -e POSTGRES_USER=uttu \ - -e POSTGRES_PASSWORD=uttu \ - -e POSTGRES_DB=uttu \ - -p 5432:5432 \ - -v db_local:/var/lib/postgresql \ - --restart=always \ - postgis/postgis:13-3.3 +docker compose up -d ``` -Now a Docker container is running in the background. Check its status with `docker ps`. - -To stop, find its ID from `docker ps`, and run `docker stop theid` (beginning of hash). To restart it, find the ID from -`docker container list` and run `docker restart theid`. - Run the [database initialization script](./src/main/resources/db_init.sh). ```shell @@ -129,14 +140,16 @@ Provider-specific GraphQL endpoint (replace {providerCode} with provider's codes /services/flexible-lines/{providerCode}/graphql -## Netex Export +## NeTEx Export Uttu exports (via provider specific GraphQL API) generated NeTEx file to a blobstore repository. -Choose one of three implementations with profiles: +Choose one from the available implementations with matching profile: - `in-memory-blobstore` - stores exports in memory, exports are lost on restarts, suitable for development and testing - `disk-blobstore` - stores exports on disk - `gcp-blobstore` - stores exports in Google Cloud Storage, requires additional configuration +- `s3-blobstore` - stores exports in Amazon Web Services Simple Storage Service (AWS S3), requires additional + configuration Alternatively, provide a [`BlobStoreRepository`](https://github.com/entur/rutebanken-helpers/blob/master/storage/src/main/java/org/rutebanken/helper/storage/repository/BlobStoreRepository.java) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..2ffc4021 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,46 @@ +name: 'uttu' + +services: + db: + container_name: '${COMPOSE_PROJECT_NAME}_postgis13' + image: postgis/postgis:13-3.3 + platform: linux/amd64 + restart: always + environment: + POSTGRES_USER: uttu + POSTGRES_PASSWORD: uttu + POSTGRES_DB: uttu + ports: + - "5432:5432" + volumes: + - postgres-data:/var/lib/postgresql + networks: + - uttu + + localstack: + container_name: "${COMPOSE_PROJECT_NAME}_localstack" + profiles: ["aws"] + image: localstack/localstack:3.4.0 + ports: + - "4566:4566" # LocalStack Gateway + - "4510-4559:4510-4559" # external services port range + environment: + - DEBUG=${DEBUG-} + - DOCKER_HOST=unix:///var/run/docker.sock + - DISABLE_EVENTS=1 + - SERVICES=s3 + - AWS_ACCESS_KEY_ID=localstack + - AWS_SECRET_ACCESS_KEY=localstack + - AWS_DEFAULT_REGION=eu-north-1 + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" + - "./scripts/init-localstack.sh:/etc/localstack/init/ready.d/init-localstack.sh" + networks: + - uttu + +volumes: + postgres-data: + +networks: + uttu: diff --git a/pom.xml b/pom.xml index 60e6d9e2..f11e5673 100644 --- a/pom.xml +++ b/pom.xml @@ -62,6 +62,8 @@ 2.1.0 0.22 write + + 2.25.60 @@ -83,6 +85,18 @@ + + + + software.amazon.awssdk + bom + ${awssdk.version} + pom + import + + + + @@ -277,6 +291,26 @@ test + + + + software.amazon.awssdk + netty-nio-client + + + + software.amazon.awssdk + aws-crt-client + + + + software.amazon.awssdk + s3 + @@ -317,6 +351,12 @@ org.testcontainers gcloud + + org.testcontainers + localstack + 1.19.8 + test + org.assertj assertj-core diff --git a/scripts/init-localstack.sh b/scripts/init-localstack.sh new file mode 100755 index 00000000..aaeb4110 --- /dev/null +++ b/scripts/init-localstack.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# -- > Create S3 bucket for blob storage +awslocal s3api create-bucket --bucket 'local-blobstore-exports' --create-bucket-configuration LocationConstraint=eu-north-1 diff --git a/src/ext-test/java/no/entur/uttu/ext/entur/export/messaging/EnturPubSubMessagingServiceTest.java b/src/ext-test/java/no/entur/uttu/ext/entur/export/messaging/EnturPubSubMessagingServiceTest.java index c4044066..e8a743e8 100644 --- a/src/ext-test/java/no/entur/uttu/ext/entur/export/messaging/EnturPubSubMessagingServiceTest.java +++ b/src/ext-test/java/no/entur/uttu/ext/entur/export/messaging/EnturPubSubMessagingServiceTest.java @@ -26,7 +26,12 @@ import no.entur.uttu.UttuIntegrationTest; import no.entur.uttu.export.messaging.spi.MessagingService; import org.entur.pubsub.base.EnturGooglePubSubAdmin; -import org.junit.*; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.test.context.TestConfiguration; @@ -39,7 +44,7 @@ import org.testcontainers.utility.DockerImageName; @Testcontainers -@ActiveProfiles({ "test", "entur-pubsub-messaging-service" }) +@ActiveProfiles({ "in-memory-blobstore", "entur-pubsub-messaging-service" }) public class EnturPubSubMessagingServiceTest extends UttuIntegrationTest { public static final String TEST_CODESPACE = "rut"; diff --git a/src/main/java/no/entur/uttu/export/blob/LocalDiskBlobStoreRepositoryConfig.java b/src/main/java/no/entur/uttu/export/blob/LocalDiskBlobStoreRepositoryConfig.java index f2089f77..1a072156 100644 --- a/src/main/java/no/entur/uttu/export/blob/LocalDiskBlobStoreRepositoryConfig.java +++ b/src/main/java/no/entur/uttu/export/blob/LocalDiskBlobStoreRepositoryConfig.java @@ -24,7 +24,7 @@ import org.springframework.context.annotation.Profile; @Configuration -@Profile({ "local", "test", "local-disk-blobstore" }) +@Profile({ "local-disk-blobstore" }) public class LocalDiskBlobStoreRepositoryConfig { @Bean diff --git a/src/main/java/no/entur/uttu/export/blob/S3BlobStoreRepository.java b/src/main/java/no/entur/uttu/export/blob/S3BlobStoreRepository.java new file mode 100644 index 00000000..b1a09d57 --- /dev/null +++ b/src/main/java/no/entur/uttu/export/blob/S3BlobStoreRepository.java @@ -0,0 +1,231 @@ +package no.entur.uttu.export.blob; + +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import org.rutebanken.helper.storage.BlobAlreadyExistsException; +import org.rutebanken.helper.storage.repository.BlobStoreRepository; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; +import software.amazon.awssdk.core.async.BlockingInputStreamAsyncRequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.S3Object; +import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher; + +/** + * AWS S3 backed implementation of {@link BlobStoreRepository}. + */ +public class S3BlobStoreRepository implements BlobStoreRepository { + + /** + * All file versions are always hardcoded to be zero since {@link BlobStoreRepository} is modeled after GCS which + * provides a linear numeric value for object versioning while S3 uses + * E-Tags which has to be enabled + * separately. These two are incompatible and since the versioning number is not used in uttu it doesn't matter much + * if this is hardcoded to be always zero. + */ + private static final long UNKNOWN_LATEST_VERSION = 0; + + private final S3AsyncClient s3AsyncClient; + + private String containerName; + + public S3BlobStoreRepository(S3AsyncClient s3AsyncClient) { + this.s3AsyncClient = Objects.requireNonNull(s3AsyncClient); + } + + @Override + public InputStream getBlob(String objectName) { + return s3AsyncClient + .getObject( + GetObjectRequest.builder().bucket(containerName).key(objectName).build(), + AsyncResponseTransformer.toBlockingInputStream() + ) + .join(); + } + + @Override + public long uploadBlob(String objectName, InputStream inputStream) { + return uploadBlob(objectName, inputStream, null); + } + + @Override + public long uploadBlob(String objectName, InputStream inputStream, String contentType) { + BlockingInputStreamAsyncRequestBody body = AsyncRequestBody.forBlockingInputStream( + null + ); // 'null' indicates a stream will be provided later. + + CompletableFuture responseFuture = s3AsyncClient.putObject( + r -> { + r.bucket(containerName).key(objectName); + if (contentType != null) { + r.contentType(contentType); + } + }, + body + ); + + // Provide the stream of data to be uploaded. + long v = body.writeInputStream(inputStream); + + PutObjectResponse r = responseFuture.join(); // Wait for the response. + return UNKNOWN_LATEST_VERSION; + } + + @Override + public long uploadNewBlob(String objectName, InputStream inputStream) { + if (objectExists(containerName, objectName)) { + throw new BlobAlreadyExistsException( + "Blob '" + objectName + "' already exists in bucket '" + containerName + "'" + ); + } else { + return uploadBlob(objectName, inputStream); + } + } + + private boolean objectExists(String containerName, String objectName) { + return s3AsyncClient + .headObject(headObjectRequest -> + headObjectRequest.bucket(containerName).key(objectName) + ) + .exceptionally(throwable -> null) + .thenApply(Objects::nonNull) + .join(); + } + + @Override + public void copyBlob( + String sourceContainerName, + String sourceObjectName, + String targetContainerName, + String targetObjectName + ) { + s3AsyncClient + .copyObject(copyObjectRequest -> + copyObjectRequest + .sourceBucket(sourceContainerName) + .sourceKey(sourceObjectName) + .destinationBucket(targetContainerName) + .destinationKey(targetObjectName) + ) + .join(); + } + + @Override + public void copyVersionedBlob( + String sourceContainerName, + String sourceObjectName, + Long sourceVersion, + String targetContainerName, + String targetObjectName + ) { + // NOTE: S3 implementation does not support numeric versioning + copyBlob( + sourceContainerName, + sourceObjectName, + targetContainerName, + targetObjectName + ); + } + + @Override + public void copyAllBlobs( + String sourceContainerName, + String prefix, + String targetContainerName, + String targetPrefix + ) { + iteratePrefix( + sourceContainerName, + prefix, + s3Objects -> { + for (S3Object s3Object : s3Objects) { + String targetKey = targetPrefix + trimPrefix(prefix, s3Object.key()); + copyBlob(sourceContainerName, s3Object.key(), targetContainerName, targetKey); + } + return null; + } + ); + } + + private String trimPrefix(String prefix, String s) { + if (s.startsWith(prefix)) { + return s.substring(prefix.length()); + } + return s; + } + + @Override + public boolean delete(String objectName) { + s3AsyncClient.deleteObject(r -> r.bucket(containerName).key(objectName)).join(); + + return !objectExists(containerName, objectName); + } + + @Override + public boolean deleteAllFilesInFolder(String folder) { + for (boolean pageResult : iteratePrefix( + containerName, + folder, + s3Objects -> { + List objectIdentifiers = s3Objects + .stream() + .map(s3Object -> ObjectIdentifier.builder().key(s3Object.key()).build()) + .toList(); + return s3AsyncClient + .deleteObjects(deleteObjectsRequest -> + deleteObjectsRequest.delete(delete -> delete.objects(objectIdentifiers)) + ) + .join() + .errors() + .isEmpty(); + } + )) { + if (!pageResult) { + return false; + } + } + return true; + } + + private List iteratePrefix( + String containerName, + String prefix, + Function, U> mapper + ) { + ListObjectsV2Publisher publisher = s3AsyncClient.listObjectsV2Paginator(req -> + req.bucket(containerName).prefix(prefix) + ); + + List pageResults = new ArrayList<>(); + CompletableFuture future = publisher.subscribe(res -> + pageResults.add(mapper.apply(res.contents())) + ); + try { + future.get(); + } catch (InterruptedException | ExecutionException ignored) { + // ignored on purpose + } + + return pageResults; + } + + /** + * {@inheritDoc} + *

+ * For this implementation container name maps to AWS S3 bucket name as-is. + * + * @param containerName Container name to use as source AWS S3 bucket name. + */ + @Override + public void setContainerName(String containerName) { + this.containerName = containerName; + } +} diff --git a/src/main/java/no/entur/uttu/export/blob/S3BlobStoreRepositoryConfig.java b/src/main/java/no/entur/uttu/export/blob/S3BlobStoreRepositoryConfig.java new file mode 100644 index 00000000..8fe33300 --- /dev/null +++ b/src/main/java/no/entur/uttu/export/blob/S3BlobStoreRepositoryConfig.java @@ -0,0 +1,83 @@ +/* + * Licensed under the EUPL, Version 1.2 or – as soon they will be approved by + * the European Commission - subsequent versions of the EUPL (the "Licence"); + * You may not use this work except in compliance with the Licence. + * You may obtain a copy of the Licence at: + * + * https://joinup.ec.europa.eu/software/page/eupl + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the Licence is distributed on an "AS IS" basis, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the Licence for the specific language governing permissions and + * limitations under the Licence. + * + */ + +package no.entur.uttu.export.blob; + +import java.net.URI; +import org.rutebanken.helper.storage.repository.BlobStoreRepository; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Profile; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3CrtAsyncClientBuilder; + +@Configuration +@Profile("s3-blobstore") +public class S3BlobStoreRepositoryConfig { + + @Value("${blobstore.s3.region}") + private String region; + + @Value("${blobstore.s3.endpointOverride:#{null}}") + private String endpointOverride; + + @Bean + BlobStoreRepository blobStoreRepository( + @Value("${blobstore.s3.bucket}") String containerName, + S3AsyncClient s3AsyncClient + ) { + S3BlobStoreRepository s3BlobStoreRepository = new S3BlobStoreRepository( + s3AsyncClient + ); + s3BlobStoreRepository.setContainerName(containerName); + return s3BlobStoreRepository; + } + + @Profile("local | test") + @Bean + public AwsCredentialsProvider localCredentials( + @Value("blobstore.s3.accessKeyId") String accessKeyId, + @Value("blobstore.s3.secretKey") String secretKey + ) { + return StaticCredentialsProvider.create( + AwsBasicCredentials.create(accessKeyId, secretKey) + ); + } + + @Profile("!local & !test") + @Bean + public AwsCredentialsProvider cloudCredentials() { + return DefaultCredentialsProvider.create(); + } + + @Bean + public S3AsyncClient s3AsyncClient(AwsCredentialsProvider credentialsProvider) { + S3CrtAsyncClientBuilder b = S3AsyncClient + .crtBuilder() + .credentialsProvider(credentialsProvider) + .region(Region.of(region)); + if (endpointOverride != null) { + b = b.endpointOverride(URI.create(endpointOverride)); + } + return b.build(); + } +} diff --git a/src/test/groovy/no/entur/uttu/graphql/AbstractGraphQLResourceIntegrationTest.groovy b/src/test/groovy/no/entur/uttu/graphql/AbstractGraphQLResourceIntegrationTest.groovy index ea66aca4..a04f24c9 100644 --- a/src/test/groovy/no/entur/uttu/graphql/AbstractGraphQLResourceIntegrationTest.groovy +++ b/src/test/groovy/no/entur/uttu/graphql/AbstractGraphQLResourceIntegrationTest.groovy @@ -24,11 +24,13 @@ import no.entur.uttu.UttuIntegrationTest import no.entur.uttu.stubs.UserContextServiceStub import org.junit.Before import org.springframework.beans.factory.annotation.Autowired +import org.springframework.test.context.ActiveProfiles; import java.time.LocalDate import static io.restassured.RestAssured.given +@ActiveProfiles([ "in-memory-blobstore" ]) abstract class AbstractGraphQLResourceIntegrationTest extends UttuIntegrationTest { @Autowired diff --git a/src/test/java/no/entur/uttu/export/blob/S3BlobStoreRepositoryTest.java b/src/test/java/no/entur/uttu/export/blob/S3BlobStoreRepositoryTest.java new file mode 100644 index 00000000..4cb4b873 --- /dev/null +++ b/src/test/java/no/entur/uttu/export/blob/S3BlobStoreRepositoryTest.java @@ -0,0 +1,196 @@ +package no.entur.uttu.export.blob; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ExecutionException; +import no.entur.uttu.UttuIntegrationTest; +import org.jetbrains.annotations.NotNull; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.rutebanken.helper.storage.BlobAlreadyExistsException; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ActiveProfiles; +import org.springframework.test.context.DynamicPropertyRegistry; +import org.springframework.test.context.DynamicPropertySource; +import org.testcontainers.containers.localstack.LocalStackContainer; +import org.testcontainers.containers.localstack.LocalStackContainer.Service; +import org.testcontainers.junit.jupiter.Testcontainers; +import org.testcontainers.utility.DockerImageName; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.CreateBucketRequest; +import software.amazon.awssdk.services.s3.model.HeadObjectResponse; +import software.amazon.awssdk.services.s3.model.NoSuchBucketException; + +@Testcontainers +@ActiveProfiles({ "s3-blobstore" }) +public class S3BlobStoreRepositoryTest extends UttuIntegrationTest { + + private static final String TEST_BUCKET = "test-blobstore-exports"; + + private static LocalStackContainer localStack; + + @Autowired + private S3AsyncClient s3AsyncClient; + + @Autowired + private S3BlobStoreRepository blobStore; + + @DynamicPropertySource + static void blobStoreProperties(DynamicPropertyRegistry registry) { + registry.add( + "blobstore.s3.endpointOverride", + () -> localStack.getEndpointOverride(Service.S3) + ); + registry.add("blobstore.s3.region", () -> localStack.getRegion()); + registry.add("blobstore.s3.accessKeyId", () -> localStack.getAccessKey()); + registry.add("blobstore.s3.secretKey", () -> localStack.getSecretKey()); + registry.add("blobstore.s3.bucket", () -> TEST_BUCKET); + } + + private void createBucket(String bucketName) + throws ExecutionException, InterruptedException { + s3AsyncClient + .headBucket(request -> request.bucket(bucketName)) + .exceptionally(throwable -> { + if (throwable.getCause() instanceof NoSuchBucketException) { + s3AsyncClient + .createBucket(CreateBucketRequest.builder().bucket(bucketName).build()) + .join(); + } + return null; + }) + .get(); + } + + @BeforeClass + public static void init() { + localStack = + new LocalStackContainer(DockerImageName.parse("localstack/localstack:3.4.0")) + .withServices(Service.S3) + .withEnv("DEFAULT_REGION", Region.EU_NORTH_1.id()); + localStack.start(); + } + + @Before + public void setUp() throws Exception { + createBucket(TEST_BUCKET); + } + + @Test + public void canRoundtripAFile() throws Exception { + String original = "Hello, BlobStore!"; + assertBlobExists(TEST_BUCKET, "myblob", false); + blobStore.uploadBlob("myblob", new ByteArrayInputStream(original.getBytes())); + assertBlobExists(TEST_BUCKET, "myblob", true); + Assert.assertEquals(original, new String(blobStore.getBlob("myblob").readAllBytes())); + Assert.assertTrue(blobStore.delete("myblob")); + } + + @Test(expected = BlobAlreadyExistsException.class) + public void cannotOverWriteExistingObject() throws Exception { + String original = "another bytes the dust"; + assertBlobExists(TEST_BUCKET, "anotherblob", false); + blobStore.uploadNewBlob("anotherblob", new ByteArrayInputStream(original.getBytes())); + assertBlobExists(TEST_BUCKET, "anotherblob", true); + blobStore.uploadNewBlob( + "anotherblob", + new ByteArrayInputStream("something silly".getBytes()) + ); + } + + /** + * Implementation note: Content type can be set for S3, but it does not mean much when downloading. The header + * does persist in object metadata though. + */ + @Test + public void canSetContentTypeForUpload() throws Exception { + String contentType = "application/json"; + blobStore.uploadBlob( + "json", + new ByteArrayInputStream("{\"key\":false}".getBytes()), + contentType + ); + assertBlobExists(TEST_BUCKET, "json", true); + HeadObjectResponse response = s3AsyncClient + .headObject(request -> request.bucket(TEST_BUCKET).key("json")) + .join(); + Assert.assertEquals(contentType, response.contentType()); + } + + @Test + public void canCopyContentBetweenBuckets() throws Exception { + String targetBucket = "another-bucket"; + String content = "1"; + createBucket(targetBucket); + blobStore.uploadBlob("smallfile", asStream(content)); + blobStore.copyBlob(TEST_BUCKET, "smallfile", targetBucket, "tinyfile"); + blobStore.setContainerName(targetBucket); + Assert.assertEquals( + content, + new String(blobStore.getBlob("tinyfile").readAllBytes()) + ); + blobStore.setContainerName(TEST_BUCKET); + } + + /** + * Implementation note: Version is no-op with S3 as the current interface models it based on GCP's blob storage + * semantics. This is effectively the same as copying the blob normally. + * @throws Exception + */ + @Test + public void canCopyVersionedContentBetweenBuckets() throws Exception { + String targetBucket = "yet-another-bucket"; + String content = "a"; + createBucket(targetBucket); + blobStore.uploadBlob("minusculefile", asStream(content)); + blobStore.copyVersionedBlob( + TEST_BUCKET, + "minusculefile", + -1_000_000L, + targetBucket, + "barelyworthmentioningfile" + ); + blobStore.setContainerName(targetBucket); + Assert.assertEquals( + content, + new String(blobStore.getBlob("barelyworthmentioningfile").readAllBytes()) + ); + blobStore.setContainerName(TEST_BUCKET); + } + + @Test + public void canCopyAllBlobsWithSharedPrefix() throws Exception { + String targetBucket = "one-more-bucket"; + createBucket(targetBucket); + blobStore.uploadBlob("things/a", asStream("a")); + blobStore.uploadBlob("things/b", asStream("b")); + blobStore.uploadBlob("things/c", asStream("c")); + blobStore.uploadBlob("stuff/d", asStream("d")); + blobStore.copyAllBlobs(TEST_BUCKET, "things", targetBucket, "bits"); + assertBlobExists(targetBucket, "bits/a", true); + assertBlobExists(targetBucket, "bits/b", true); + assertBlobExists(targetBucket, "bits/c", true); + assertBlobExists(targetBucket, "stuff/d", false); + } + + private static @NotNull ByteArrayInputStream asStream(String source) { + return new ByteArrayInputStream(source.getBytes(StandardCharsets.UTF_8)); + } + + private void assertBlobExists(String bucket, String key, boolean exists) + throws ExecutionException, InterruptedException { + HeadObjectResponse response = s3AsyncClient + .headObject(request -> request.bucket(bucket).key(key)) + .exceptionally(throwable -> null) + .get(); + if (!exists && response != null) { + Assert.fail(bucket + " / " + key + " exists"); + } + if (exists && response == null) { + Assert.fail(bucket + " / " + key + " does not exist"); + } + } +}