Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@
import static org.apache.hadoop.fs.s3a.S3AUtils.*;
import static org.apache.hadoop.fs.s3a.Statistic.*;
import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.INITIALIZE_SPAN;
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_SSE_KMS_RW;
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_KMS_RW;
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3Operations;
import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.TokenIssuingPolicy.NoTokensAvailable;
import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.hasDelegationTokenBinding;
Expand Down Expand Up @@ -4222,7 +4222,7 @@ public List<RoleModel.Statement> listAWSPolicyRules(
// no attempt is made to qualify KMS access; there's no
// way to predict read keys, and not worried about granting
// too much encryption access.
statements.add(STATEMENT_ALLOW_SSE_KMS_RW);
statements.add(STATEMENT_ALLOW_KMS_RW);

return statements;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ private RolePolicies() {
* Statement to allow KMS R/W access access, so full use of
* SSE-KMS.
*/
public static final Statement STATEMENT_ALLOW_SSE_KMS_RW =
public static final Statement STATEMENT_ALLOW_KMS_RW =
statement(true, KMS_ALL_KEYS, KMS_ALL_OPERATIONS);

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ public synchronized void teardown() throws Exception {
@Test
public void testCacheFileExistence() throws Throwable {
describe("Verify that FS cache files exist on local FS");
skipIfClientSideEncryption();

try (FSDataInputStream in = fs.open(testFile)) {
byte[] buffer = new byte[prefetchBlockSize];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ private static int calculateNumBlocks(long largeFileSize, int blockSize) {
@Test
public void testReadLargeFileFully() throws Throwable {
describe("read a large file fully, uses S3ACachingInputStream");
skipIfClientSideEncryption();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shall we just move these into openFS() since we're assuming the FS it provides is not compatible with CSE for now?

IOStatistics ioStats;
openFS();

Expand Down Expand Up @@ -151,6 +152,7 @@ public void testReadLargeFileFully() throws Throwable {
public void testReadLargeFileFullyLazySeek() throws Throwable {
describe("read a large file using readFully(position,buffer,offset,length),"
+ " uses S3ACachingInputStream");
skipIfClientSideEncryption();
IOStatistics ioStats;
openFS();

Expand Down Expand Up @@ -182,6 +184,7 @@ public void testReadLargeFileFullyLazySeek() throws Throwable {
@Test
public void testRandomReadLargeFile() throws Throwable {
describe("random read on a large file, uses S3ACachingInputStream");
skipIfClientSideEncryption();
IOStatistics ioStats;
openFS();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ protected Configuration createConfiguration() {
@Test
public void testRequesterPaysOptionSuccess() throws Throwable {
describe("Test requester pays enabled case by reading last then first byte");

skipIfClientSideEncryption();
Configuration conf = this.createConfiguration();
conf.setBoolean(ALLOW_REQUESTER_PAYS, true);
// Enable bucket exists check, the first failure point people may encounter
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -426,8 +426,7 @@ public void testAssumeRolePoliciesOverrideRolePerms() throws Throwable {
bindRolePolicy(conf,
policy(
statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT),
ALLOW_S3_GET_BUCKET_LOCATION,
STATEMENT_ALLOW_SSE_KMS_RW));
ALLOW_S3_GET_BUCKET_LOCATION, STATEMENT_ALLOW_KMS_RW));
Path path = path("testAssumeRoleStillIncludesRolePerms");
roleFS = (S3AFileSystem) path.getFileSystem(conf);
assertTouchForbidden(roleFS, path);
Expand All @@ -447,8 +446,7 @@ public void testReadOnlyOperations() throws Throwable {
bindRolePolicy(conf,
policy(
statement(false, S3_ALL_BUCKETS, S3_PATH_WRITE_OPERATIONS),
STATEMENT_ALL_S3,
STATEMENT_ALLOW_SSE_KMS_READ));
STATEMENT_ALL_S3, STATEMENT_ALLOW_KMS_RW));
Path path = methodPath();
roleFS = (S3AFileSystem) path.getFileSystem(conf);
// list the root path, expect happy
Expand Down Expand Up @@ -495,8 +493,7 @@ public void testRestrictedWriteSubdir() throws Throwable {
Configuration conf = createAssumedRoleConfig();

bindRolePolicyStatements(conf,
STATEMENT_ALL_BUCKET_READ_ACCESS,
STATEMENT_ALLOW_SSE_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS, STATEMENT_ALLOW_KMS_RW,
new Statement(Effects.Allow)
.addActions(S3_ALL_OPERATIONS)
.addResources(directory(restrictedDir)));
Expand Down Expand Up @@ -563,8 +560,7 @@ public void testRestrictedCommitActions() throws Throwable {
fs.delete(basePath, true);
fs.mkdirs(readOnlyDir);

bindRolePolicyStatements(conf,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS,
new Statement(Effects.Allow)
.addActions(S3_PATH_RW_OPERATIONS)
Expand Down Expand Up @@ -714,8 +710,7 @@ public void executePartialDelete(final Configuration conf,
S3AFileSystem fs = getFileSystem();
fs.delete(destDir, true);

bindRolePolicyStatements(conf,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
new Statement(Effects.Deny)
.addActions(S3_PATH_WRITE_OPERATIONS)
Expand Down Expand Up @@ -746,8 +741,7 @@ public void testBucketLocationForbidden() throws Throwable {
describe("Restrict role to read only");
Configuration conf = createAssumedRoleConfig();

bindRolePolicyStatements(conf,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
statement(false, S3_ALL_BUCKETS, S3_GET_BUCKET_LOCATION));
Path path = methodPath();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@ public void setup() throws Exception {
restrictedDir = super.path("restricted");
Configuration conf = newAssumedRoleConfig(getConfiguration(),
getAssumedRoleARN());
bindRolePolicyStatements(conf,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_BUCKET_READ_OPERATIONS),
new RoleModel.Statement(RoleModel.Effects.Allow)
.addActions(S3_PATH_RW_OPERATIONS)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,8 +260,7 @@ public void initNoReadAccess() throws Throwable {
// it still has write access, which can be explored in the final
// step to delete files and directories.
roleConfig = createAssumedRoleConfig();
bindRolePolicyStatements(roleConfig,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
new Statement(Effects.Deny)
.addActions(S3_ALL_GET)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.resource;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement;
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements;
Expand Down Expand Up @@ -144,6 +145,11 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase {
*/
private Path writableDir;

/**
* Instruction file created when using CSE, required to be added to policies.
*/
private Path writableDirInstructionFile;

/**
* A directory to which restricted roles have only read access.
*/
Expand Down Expand Up @@ -216,6 +222,7 @@ public void setup() throws Exception {
basePath = uniquePath();
readOnlyDir = new Path(basePath, "readonlyDir");
writableDir = new Path(basePath, "writableDir");
writableDirInstructionFile = new Path(basePath, "writableDir.instruction");
readOnlyChild = new Path(readOnlyDir, "child");
noReadDir = new Path(basePath, "noReadDir");
// the full FS
Expand All @@ -225,8 +232,7 @@ public void setup() throws Exception {

// create the baseline assumed role
assumedRoleConfig = createAssumedRoleConfig();
bindRolePolicyStatements(assumedRoleConfig,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(assumedRoleConfig, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x
new Statement(Effects.Allow) // dest: rwx
.addActions(S3_PATH_RW_OPERATIONS)
Expand Down Expand Up @@ -365,13 +371,13 @@ public void testMultiDeleteOptionPropagated() throws Throwable {
public void testRenameParentPathNotWriteable() throws Throwable {
describe("rename with parent paths not writeable; multi=%s", multiDelete);
final Configuration conf = createAssumedRoleConfig();
bindRolePolicyStatements(conf,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS,
new Statement(Effects.Allow)
.addActions(S3_PATH_RW_OPERATIONS)
.addResources(directory(readOnlyDir))
.addResources(directory(writableDir)));
.addResources(directory(writableDir))
.addResources(resource(writableDirInstructionFile, false, false)));
roleFS = (S3AFileSystem) readOnlyDir.getFileSystem(conf);

S3AFileSystem fs = getFileSystem();
Expand Down Expand Up @@ -733,8 +739,7 @@ public void testRenamePermissionRequirements() throws Throwable {
// s3:DeleteObjectVersion permission, and attempt rename
// and then delete.
Configuration roleConfig = createAssumedRoleConfig();
bindRolePolicyStatements(roleConfig,
STATEMENT_ALLOW_SSE_KMS_RW,
bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x
new Statement(Effects.Allow) // dest: rwx
.addActions(S3_PATH_RW_OPERATIONS)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ public void testLandsatBucketRequireGuarded() throws Throwable {

@Test
public void testLandsatBucketRequireUnencrypted() throws Throwable {
skipIfClientSideEncryption();
run(BucketInfo.NAME,
"-" + BucketInfo.ENCRYPTION_FLAG, "none",
getLandsatCSVFile(getConfiguration()));
Expand Down Expand Up @@ -178,8 +179,9 @@ public void testUploadListByAge() throws Throwable {
// least a second old
describe("Sleeping 1 second then confirming upload still there");
Thread.sleep(1000);
LambdaTestUtils.eventually(5000, 1000,
() -> { assertNumUploadsAge(path, 1, 1); });
LambdaTestUtils.eventually(5000, 1000, () -> {
assertNumUploadsAge(path, 1, 1);
});

// 7. Assert deletion works when age filter matches
describe("Doing aged deletion");
Expand Down Expand Up @@ -231,8 +233,8 @@ private void assertNumDeleted(S3AFileSystem fs, Path path, int numDeleted)
* search all parts
* @throws Exception on failure
*/
private void uploadCommandAssertCount(S3AFileSystem fs, String options[],
Path path, int numUploads, int ageSeconds)
private void uploadCommandAssertCount(S3AFileSystem fs, String[] options, Path path,
int numUploads, int ageSeconds)
throws Exception {
List<String> allOptions = new ArrayList<>();
List<String> output = new ArrayList<>();
Expand Down