diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java index 67df37e5ebfcd..b65a38184d869 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.s3a.api.S3ObjectStorageClassFilter; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; import java.time.Duration; @@ -1519,6 +1520,18 @@ private Constants() { */ public static final int DEFAULT_PREFETCH_MAX_BLOCKS_COUNT = 4; + /** + * Read Restored Glacier objects config. + * Value = {@value} + */ + public static final String READ_RESTORED_GLACIER_OBJECTS = "fs.s3a.glacier.read.restored.objects"; + + /** + * Default value of Read Restored Glacier objects config. + */ + public static final String DEFAULT_READ_RESTORED_GLACIER_OBJECTS = + S3ObjectStorageClassFilter.READ_ALL.toString(); + /** * The bucket region header. */ diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java index e0868a2e13087..002c1707955aa 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.s3a.api.S3ObjectStorageClassFilter; import org.apache.hadoop.fs.s3a.impl.AbstractStoreOperation; import org.apache.hadoop.fs.s3a.impl.ListingOperationCallbacks; import org.apache.hadoop.fs.s3a.impl.StoreContext; @@ -76,6 +77,7 @@ public class Listing extends AbstractStoreOperation { private static final Logger LOG = S3AFileSystem.LOG; private final boolean isCSEEnabled; + private final S3ObjectStorageClassFilter s3ObjectStorageClassFilter; static final FileStatusAcceptor ACCEPT_ALL_BUT_S3N = new AcceptAllButS3nDirs(); @@ -87,6 +89,7 @@ public Listing(ListingOperationCallbacks listingOperationCallbacks, super(storeContext); this.listingOperationCallbacks = listingOperationCallbacks; this.isCSEEnabled = storeContext.isCSEEnabled(); + this.s3ObjectStorageClassFilter = storeContext.getS3ObjectsStorageClassFilter(); } /** @@ -462,7 +465,10 @@ private boolean buildNextStatusBatch(S3ListResult objects) { LOG.debug("{}: {}", keyPath, stringify(s3Object)); } // Skip over keys that are ourselves and old S3N _$folder$ files - if (acceptor.accept(keyPath, s3Object) && filter.accept(keyPath)) { + // Handle Glacier Storage Class based on the config fs.s3a.glacier.read.restored.objects + if (s3ObjectStorageClassFilter.getFilter().apply(s3Object) && + acceptor.accept(keyPath, s3Object) && + filter.accept(keyPath)) { S3AFileStatus status = createFileStatus(keyPath, s3Object, listingOperationCallbacks.getDefaultBlockSize(keyPath), getStoreContext().getUsername(), diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 0e2ae0f74dd0a..b71a0a8e154f2 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -113,6 +113,7 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.impl.OpenFileParameters; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.s3a.api.S3ObjectStorageClassFilter; import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; import org.apache.hadoop.fs.s3a.auth.SignerManager; import org.apache.hadoop.fs.s3a.auth.delegation.DelegationOperations; @@ -444,6 +445,12 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities, */ private boolean isCSEEnabled; + /** + * {@link S3ObjectStorageClassFilter} will filter the S3 files based on the + * {@code fs.s3a.glacier.read.restored.objects} configuration. + */ + private S3ObjectStorageClassFilter s3ObjectStorageClassFilter; + /** * Bucket AccessPoint. */ @@ -585,6 +592,18 @@ public void initialize(URI name, Configuration originalConf) s3aInternals = createS3AInternals(); + try { + s3ObjectStorageClassFilter = Optional.of(conf.getTrimmed(READ_RESTORED_GLACIER_OBJECTS, + DEFAULT_READ_RESTORED_GLACIER_OBJECTS)) + .map(String::toUpperCase) + .map(S3ObjectStorageClassFilter::valueOf).get(); + } catch (IllegalArgumentException e) { + LOG.warn("Invalid value for the config {} is set. Valid values are:" + + "READ_ALL, SKIP_ALL_GLACIER, READ_RESTORED_GLACIER_OBJECTS. Defaulting to READ_ALL", + READ_RESTORED_GLACIER_OBJECTS); + s3ObjectStorageClassFilter = S3ObjectStorageClassFilter.READ_ALL; + } + // look for encryption data // DT Bindings may override this setEncryptionSecrets( @@ -5686,6 +5705,7 @@ public StoreContext createStoreContext() { .setContextAccessors(new ContextAccessorsImpl()) .setAuditor(getAuditor()) .setEnableCSE(isCSEEnabled) + .setS3ObjectStorageClassFilter(s3ObjectStorageClassFilter) .build(); } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java index 73ad137a86d3c..d293d880b7d79 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/RequestFactory.java @@ -37,7 +37,9 @@ import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.RestoreObjectRequest; import software.amazon.awssdk.services.s3.model.StorageClass; +import software.amazon.awssdk.services.s3.model.Tier; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import org.apache.hadoop.classification.InterfaceAudience; @@ -251,4 +253,15 @@ ListObjectsV2Request.Builder newListObjectsV2RequestBuilder(String key, DeleteObjectsRequest.Builder newBulkDeleteRequestBuilder( List keysToDelete); + /** + * Create a request builder to initiate a restore of Glacier object. + * @param key object to restore + * @param tier glacier retrieval tier at which the restore will be processed. + * @param expirationDays lifetime of the active restored copy in days. + * @return the request builder + */ + RestoreObjectRequest.Builder newRestoreObjectRequestBuilder(String key, + Tier tier, + int expirationDays); + } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/S3ObjectStorageClassFilter.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/S3ObjectStorageClassFilter.java new file mode 100644 index 0000000000000..e135d8a6d1f9a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/api/S3ObjectStorageClassFilter.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.api; + +import java.util.Set; +import java.util.function.Function; + +import software.amazon.awssdk.services.s3.model.ObjectStorageClass; +import software.amazon.awssdk.services.s3.model.S3Object; + +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.util.Sets; + + +/** + *
+ * {@link S3ObjectStorageClassFilter} will filter the S3 files based on the
+ * {@code fs.s3a.glacier.read.restored.objects} configuration set in {@link S3AFileSystem}
+ * The config can have 3 values:
+ * {@code READ_ALL}: Retrieval of Glacier files will fail with InvalidObjectStateException:
+ * The operation is not valid for the object's storage class.
+ * {@code SKIP_ALL_GLACIER}: If this value is set then this will ignore any S3 Objects which are
+ * tagged with Glacier storage classes and retrieve the others.
+ * {@code READ_RESTORED_GLACIER_OBJECTS}: If this value is set then restored status of the Glacier
+ * object will be checked, if restored the objects would be read like normal S3 objects
+ * else they will be ignored as the objects would not have been retrieved from the S3 Glacier.
+ * 
+ */ +public enum S3ObjectStorageClassFilter { + READ_ALL(o -> true), + SKIP_ALL_GLACIER(S3ObjectStorageClassFilter::isNotGlacierObject), + READ_RESTORED_GLACIER_OBJECTS(S3ObjectStorageClassFilter::isCompletedRestoredObject); + + private static final Set GLACIER_STORAGE_CLASSES = Sets.newHashSet( + ObjectStorageClass.GLACIER, ObjectStorageClass.DEEP_ARCHIVE); + + private final Function filter; + + S3ObjectStorageClassFilter(Function filter) { + this.filter = filter; + } + + /** + * Checks if the s3 object is not an object with a storage class of glacier/deep_archive. + * @param object s3 object + * @return if the s3 object is not an object with a storage class of glacier/deep_archive + */ + private static boolean isNotGlacierObject(S3Object object) { + return !GLACIER_STORAGE_CLASSES.contains(object.storageClass()); + } + + /** + * Checks if the s3 object is an object with a storage class of glacier/deep_archive. + * @param object s3 object + * @return if the s3 object is an object with a storage class of glacier/deep_archive + */ + private static boolean isGlacierObject(S3Object object) { + return GLACIER_STORAGE_CLASSES.contains(object.storageClass()); + } + + /** + * Checks if the s3 object is completely restored. + * @param object s3 object + * @return if the s3 object is completely restored + */ + private static boolean isCompletedRestoredObject(S3Object object) { + if(isGlacierObject(object)) { + return object.restoreStatus() != null && !object.restoreStatus().isRestoreInProgress(); + } + return true; + } + + /** + * Returns the filter function set as part of the enum definition + * @return the filter function set as part of the enum definition + */ + public Function getFilter() { + return filter; + } + +} diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java index c91324da7cb15..c6df6965c6ea9 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/RequestFactoryImpl.java @@ -34,6 +34,7 @@ import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.GlacierJobParameters; import software.amazon.awssdk.services.s3.model.HeadBucketRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; @@ -42,9 +43,13 @@ import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.MetadataDirective; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.OptionalObjectAttributes; import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.RestoreObjectRequest; +import software.amazon.awssdk.services.s3.model.RestoreRequest; import software.amazon.awssdk.services.s3.model.ServerSideEncryption; import software.amazon.awssdk.services.s3.model.StorageClass; +import software.amazon.awssdk.services.s3.model.Tier; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.utils.Md5Utils; import org.apache.hadoop.util.Preconditions; @@ -609,6 +614,8 @@ public ListObjectsV2Request.Builder newListObjectsV2RequestBuilder( final ListObjectsV2Request.Builder requestBuilder = ListObjectsV2Request.builder() .bucket(bucket) .maxKeys(maxKeys) + // Optional Attribute to get the Restored Status of the Glacier Objects + .optionalObjectAttributes(OptionalObjectAttributes.RESTORE_STATUS) .prefix(key); if (delimiter != null) { @@ -632,6 +639,21 @@ public DeleteObjectsRequest.Builder newBulkDeleteRequestBuilder( .delete(d -> d.objects(keysToDelete).quiet(!LOG.isTraceEnabled()))); } + @Override + public RestoreObjectRequest.Builder newRestoreObjectRequestBuilder(String key, + Tier tier, + int expirationDays) { + return prepareRequest(RestoreObjectRequest + .builder() + .bucket(bucket) + .key(key) + .restoreRequest(RestoreRequest + .builder() + .days(expirationDays) + .glacierJobParameters(GlacierJobParameters.builder().tier(tier).build()) + .build())); + } + @Override public void setEncryptionSecrets(final EncryptionSecrets secrets) { encryptionSecrets = secrets; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java index 4b8a28f3e7bb0..88229eaf06f12 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContext.java @@ -26,13 +26,13 @@ import java.util.concurrent.ExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; - import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.api.RequestFactory; +import org.apache.hadoop.fs.s3a.api.S3ObjectStorageClassFilter; import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.S3AFileStatus; @@ -117,6 +117,8 @@ public class StoreContext implements ActiveThreadSpanSource { /** Is client side encryption enabled? */ private final boolean isCSEEnabled; + private final S3ObjectStorageClassFilter s3ObjectStorageClassFilter; + /** * Instantiate. */ @@ -137,7 +139,8 @@ public class StoreContext implements ActiveThreadSpanSource { final boolean useListV1, final ContextAccessors contextAccessors, final AuditSpanSource auditor, - final boolean isCSEEnabled) { + final boolean isCSEEnabled, + final S3ObjectStorageClassFilter s3ObjectStorageClassFilter) { this.fsURI = fsURI; this.bucket = bucket; this.configuration = configuration; @@ -158,6 +161,7 @@ public class StoreContext implements ActiveThreadSpanSource { this.contextAccessors = contextAccessors; this.auditor = auditor; this.isCSEEnabled = isCSEEnabled; + this.s3ObjectStorageClassFilter = s3ObjectStorageClassFilter; } public URI getFsURI() { @@ -411,4 +415,13 @@ public RequestFactory getRequestFactory() { public boolean isCSEEnabled() { return isCSEEnabled; } + + /** + * Return the S3ObjectStorageClassFilter object for S3A, + * whose value is set according to the config {@code fs.s3a.glacier.read.restored.objects}. + * @return {@link S3ObjectStorageClassFilter} object + */ + public S3ObjectStorageClassFilter getS3ObjectsStorageClassFilter() { + return s3ObjectStorageClassFilter; + } } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java index cff38b9fc4b7d..b245d582851db 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/StoreContextBuilder.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.s3a.Invoker; import org.apache.hadoop.fs.s3a.S3AInputPolicy; import org.apache.hadoop.fs.s3a.S3AStorageStatistics; +import org.apache.hadoop.fs.s3a.api.S3ObjectStorageClassFilter; import org.apache.hadoop.fs.s3a.audit.AuditSpanS3A; import org.apache.hadoop.fs.s3a.statistics.S3AStatisticsContext; import org.apache.hadoop.fs.store.audit.AuditSpanSource; @@ -69,6 +70,8 @@ public class StoreContextBuilder { private boolean isCSEEnabled; + private S3ObjectStorageClassFilter s3ObjectStorageClassFilter; + public StoreContextBuilder setFsURI(final URI fsURI) { this.fsURI = fsURI; return this; @@ -175,6 +178,12 @@ public StoreContextBuilder setEnableCSE( return this; } + public StoreContextBuilder setS3ObjectStorageClassFilter( + S3ObjectStorageClassFilter value) { + s3ObjectStorageClassFilter = value; + return this; + } + public StoreContext build() { return new StoreContext(fsURI, bucket, @@ -192,6 +201,7 @@ public StoreContext build() { useListV1, contextAccessors, auditor, - isCSEEnabled); + isCSEEnabled, + s3ObjectStorageClassFilter); } } diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md index 7412a4cebcc4f..4fcb3c1236b24 100644 --- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md +++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md @@ -927,8 +927,34 @@ The switch to turn S3A auditing on or off. Should auditing of S3A requests be enabled? +``` +## Glacier Object Support + +[Amazon S3 Glacier (S3 Glacier)](https://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html) is a secure and durable service for low-cost data archiving and +long-term backup. +With S3 Glacier, it is possible to store data more cost-effectively for months, years, or even decades. +This support introduces a new config, which decides the objects returned from listStatus. +Note : This is not available on all AWS S3 store types, or on third party stores. + +The config is as follows: +```xml + + +fs.s3a.glacier.read.restored.objects +READ_ALL + + The config can have 3 values: + * READ_ALL: Retrieval of Glacier files will fail with InvalidObjectStateException: The operation is not valid for the object's storage class. + * SKIP_ALL_GLACIER: If this value is set then this will ignore any S3 Objects which are tagged with Glacier storage classes and retrieve the others. + * READ_RESTORED_GLACIER_OBJECTS: If this value is set then restored status of the Glacier object will be checked, if restored the objects would be read like normal S3 objects else they will be ignored as the objects would not have been retrieved from the S3 Glacier. + + ``` + + ## Retry and Recovery The S3A client makes a best-effort attempt at recovering from network failures; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java new file mode 100644 index 0000000000000..8b3648da5d36a --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/list/ITestS3AReadRestoredGlacierObjects.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.list; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.assertj.core.api.Assertions; +import org.junit.Assume; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.RestoreObjectRequest; +import software.amazon.awssdk.services.s3.model.S3Object; +import software.amazon.awssdk.services.s3.model.Tier; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.fs.contract.s3a.S3AContract; +import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; +import org.apache.hadoop.fs.s3a.S3ListRequest; +import org.apache.hadoop.fs.s3a.api.S3ObjectStorageClassFilter; +import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.test.LambdaTestUtils; + +import static org.apache.hadoop.fs.s3a.Constants.READ_RESTORED_GLACIER_OBJECTS; +import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS; +import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS_DEEP_ARCHIVE; +import static org.apache.hadoop.fs.s3a.Constants.STORAGE_CLASS_GLACIER; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfStorageClassTestsDisabled; +import static org.apache.hadoop.fs.statistics.StoreStatisticNames.OBJECT_LIST_REQUEST; + + +/** + * Tests of various cases related to Glacier/Deep Archive Storage class. + */ +@RunWith(Parameterized.class) +public class ITestS3AReadRestoredGlacierObjects extends AbstractS3ATestBase { + + enum Type { GLACIER_AND_DEEP_ARCHIVE, GLACIER } + + @Parameterized.Parameters(name = "storage-class-{1}") + public static Collection data(){ + return Arrays.asList(new Object[][] { + {STORAGE_CLASS_GLACIER}, {STORAGE_CLASS_DEEP_ARCHIVE}, + }); + } + + private static final int MAX_RETRIES = 100; + private static final int RETRY_DELAY_MS = 5000; + private final String glacierClass; + + public ITestS3AReadRestoredGlacierObjects(String glacierClass) { + this.glacierClass = glacierClass; + } + + private FileSystem createFiles(String s3ObjectStorageClassFilter) throws Throwable { + FileSystem fs = createFileSystem(s3ObjectStorageClassFilter); + Path path = new Path(methodPath(), "glaciated"); + ContractTestUtils.touch(fs, path); + return fs; + } + + private FileSystem createFileSystem(String s3ObjectStorageClassFilter) throws Throwable { + Configuration conf = createConfiguration(); + conf.set(READ_RESTORED_GLACIER_OBJECTS, s3ObjectStorageClassFilter); + // Create Glacier objects:Storage Class:DEEP_ARCHIVE/GLACIER + conf.set(STORAGE_CLASS, glacierClass); + FileSystem fs = new S3AFileSystem(); + fs.initialize(getFileSystem().getUri(), conf); + return fs; + } + + @Override + protected Configuration createConfiguration() { + Configuration newConf = super.createConfiguration(); + skipIfStorageClassTestsDisabled(newConf); + disableFilesystemCaching(newConf); + removeBaseAndBucketOverrides(newConf, STORAGE_CLASS, READ_RESTORED_GLACIER_OBJECTS); + return newConf; + } + + @Test + public void testAllGlacierAndDeepArchiveCases() throws Throwable { + + try (FileSystem fs = createFiles(S3ObjectStorageClassFilter.SKIP_ALL_GLACIER.name())) { + + // testIgnoreGlacierObject + describe("Running testIgnoreGlacierObject"); + Assertions.assertThat( + fs.listStatus(methodPath())) + .describedAs("FileStatus List of %s", methodPath()) + .isEmpty(); + + // testReadAllObjects + describe("Running testReadAllObjects"); + try (FileSystem fsTest = createFileSystem(S3ObjectStorageClassFilter.READ_ALL.name())) { + Assertions.assertThat( + fsTest.listStatus(methodPath())) + .describedAs("FileStatus List of %s", methodPath()) + .isNotEmpty(); + } + + // testIgnoreRestoringGlacierObject + describe("Running testIgnoreRestoringGlacierObject"); + try (FileSystem fsTest = createFileSystem(S3ObjectStorageClassFilter.READ_RESTORED_GLACIER_OBJECTS.name())) { + Assertions.assertThat( + fsTest.listStatus(methodPath())) + .describedAs("FileStatus List of %s", methodPath()) + .isEmpty(); + } + + // testConfigWithInvalidValue + describe("Running testConfigWithInvalidValue"); + String invalidValue = "ABCDE"; + try (FileSystem fsTest = createFiles(invalidValue)) { + Assertions.assertThat( + fsTest.listStatus(methodPath())) + .describedAs("FileStatus List of %s", methodPath()) + .isNotEmpty(); + } + + + // testRestoredGlacierObject - run only for Glacier Storage Class + if ( glacierClass == STORAGE_CLASS_GLACIER ) { + describe("Running testRestoredGlacierObject"); + try (FileSystem fsTest = createFileSystem(S3ObjectStorageClassFilter.READ_RESTORED_GLACIER_OBJECTS.name())) { + restoreGlacierObject(getFilePrefixForListObjects(), 2); + Assertions.assertThat( + fsTest.listStatus(methodPath())) + .describedAs("FileStatus List of %s", methodPath()) + .isNotEmpty(); + } + + } + + } + } + + + private void restoreGlacierObject(String glacierObjectKey, int expirationDays) throws Exception { + describe("Initiate restore of the Glacier object"); + try (AuditSpan auditSpan = getSpanSource().createSpan(OBJECT_LIST_REQUEST, "", "").activate()) { + + S3Client s3Client = getFileSystem().getS3AInternals().getAmazonS3Client("test"); + + // Create a restore object request + RestoreObjectRequest requestRestore = getFileSystem().getRequestFactory() + .newRestoreObjectRequestBuilder(glacierObjectKey, Tier.EXPEDITED, expirationDays).build(); + + s3Client.restoreObject(requestRestore); + + // fetch the glacier object + S3ListRequest s3ListRequest = getFileSystem().createListObjectsRequest( + getFilePrefixForListObjects(), "/"); + + describe("Wait till restore of the object is complete"); + LambdaTestUtils.await(MAX_RETRIES * RETRY_DELAY_MS, RETRY_DELAY_MS, + () -> !getS3GlacierObject(s3Client, s3ListRequest).restoreStatus().isRestoreInProgress()); + } + } + + + private String getFilePrefixForListObjects() throws IOException { + return getFileSystem().pathToKey(new Path(methodPath(), "glaciated")); + } + + private S3Object getS3GlacierObject(S3Client s3Client, S3ListRequest s3ListRequest) { + return s3Client.listObjectsV2(s3ListRequest.getV2()).contents() + .stream() + .findFirst().orElse(null); + } +}