diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index fc5e055232..aac568a776 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -10,7 +10,7 @@ on:
jobs:
build-federation-image:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -34,7 +34,7 @@ jobs:
cache-to: type=gha,mode=max,scope=federation
build-image:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
index 2bd91d14ee..d232cf27e6 100644
--- a/.github/workflows/tests.yaml
+++ b/.github/workflows/tests.yaml
@@ -125,7 +125,7 @@ jobs:
if: always()
build:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -158,7 +158,7 @@ jobs:
cache-to: type=gha,mode=max,scope=pykmip
build-federation-image:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
diff --git a/config.json b/config.json
index 565c191226..3d3d9dd9d3 100644
--- a/config.json
+++ b/config.json
@@ -88,7 +88,9 @@
}
],
"defaultEncryptionKeyPerAccount": true,
+ "kmsHideScalityArn": false,
"kmsAWS": {
+ "providerName": "aws",
"region": "us-east-1",
"endpoint": "http://127.0.0.1:8080",
"ak": "tbd",
diff --git a/lib/Config.js b/lib/Config.js
index 6f0c39c78b..9892811f65 100644
--- a/lib/Config.js
+++ b/lib/Config.js
@@ -17,6 +17,13 @@ const { azureAccountNameRegex, base64Regex,
} = require('../constants');
const { utapiVersion } = require('utapi');
const { versioning } = require('arsenal');
+const {
+ KmsType,
+ KmsProtocol,
+ isValidProvider,
+ isValidType,
+ isValidProtocol,
+} = require('arsenal/build/lib/network/KMSInterface');
const versionIdUtils = versioning.VersionID;
@@ -401,8 +408,9 @@ class Config extends EventEmitter {
// Read config automatically
this._getLocationConfig();
- this._getConfig();
+ const config = this._getConfig();
this._configureBackends();
+ this._sseMigration(config);
}
_parseKmsAWS(config) {
@@ -411,13 +419,19 @@ class Config extends EventEmitter {
}
let kmsAWS = {};
- const { region, endpoint, ak, sk, tls } = config.kmsAWS;
+ const { providerName, region, endpoint, ak, sk, tls, noAwsArn } = config.kmsAWS;
+ assert(providerName, 'Configuration Error: providerName must be defined in kmsAWS');
+ assert(isValidProvider(providerName),
+ 'Configuration Error: kmsAWS.providerNamer must be lowercase alphanumeric only');
assert(endpoint, 'Configuration Error: endpoint must be defined in kmsAWS');
assert(ak, 'Configuration Error: ak must be defined in kmsAWS');
assert(sk, 'Configuration Error: sk must be defined in kmsAWS');
+ assert(['undefined', 'boolean'].some(type => type === typeof noAwsArn),
+ 'Configuration Error:: kmsAWS.noAwsArn must be a boolean or not set');
kmsAWS = {
+ providerName,
endpoint,
ak,
sk,
@@ -427,6 +441,10 @@ class Config extends EventEmitter {
kmsAWS.region = region;
}
+ if (noAwsArn) {
+ kmsAWS.noAwsArn = noAwsArn;
+ }
+
if (tls) {
kmsAWS.tls = {};
if (tls.rejectUnauthorized !== undefined) {
@@ -1095,8 +1113,12 @@ class Config extends EventEmitter {
this.kms = {};
if (config.kms) {
+ assert(config.kms.providerName, 'config.kms.providerName must be provided');
+ assert(isValidProvider(config.kms.providerName),
+ 'config.kms.providerName must be lowercase alphanumeric only');
assert(typeof config.kms.userName === 'string');
assert(typeof config.kms.password === 'string');
+ this.kms.providerName = config.kms.providerName;
this.kms.userName = config.kms.userName;
this.kms.password = config.kms.password;
if (config.kms.helperProgram !== undefined) {
@@ -1163,6 +1185,10 @@ class Config extends EventEmitter {
},
};
if (config.kmip) {
+ assert(config.kmip.providerName, 'config.kmip.providerName must be defined');
+ assert(isValidProvider(config.kmip.providerName),
+ 'config.kmip.providerName must be lowercase alphanumeric only');
+ this.kmip.providerName = config.kmip.providerName;
if (config.kmip.client) {
if (config.kmip.client.compoundCreateActivate) {
assert(typeof config.kmip.client.compoundCreateActivate ===
@@ -1229,6 +1255,11 @@ class Config extends EventEmitter {
assert(typeof this.defaultEncryptionKeyPerAccount === 'boolean',
'config.defaultEncryptionKeyPerAccount must be a boolean');
+ this.kmsHideScalityArn = Object.hasOwnProperty.call(config, 'kmsHideScalityArn')
+ ? config.kmsHideScalityArn
+ : true; // By default hide scality arn to keep backward compatibility and simplicity
+ assert.strictEqual(typeof this.kmsHideScalityArn, 'boolean');
+
this.healthChecks = defaultHealthChecks;
if (config.healthChecks && config.healthChecks.allowFrom) {
assert(config.healthChecks.allowFrom instanceof Array,
@@ -1410,6 +1441,7 @@ class Config extends EventEmitter {
}
this.lifecycleRoleName = config.lifecycleRoleName || null;
+ return config;
}
_configureBackends() {
@@ -1485,6 +1517,61 @@ class Config extends EventEmitter {
};
}
+ _sseMigration(config) {
+ if (config.sseMigration) {
+ /**
+ * For data that was encrypted internally by default and a new external provider is setup.
+ * This config helps detect the existing encryption key to decrypt with the good provider.
+ * The key format will be migrated automatically on GET/HEADs to include provider details.
+ */
+ this.sseMigration = {};
+ const { previousKeyType, previousKeyProtocol, previousKeyProvider } = config.sseMigration;
+ if (!previousKeyType) {
+ assert.fail(
+ 'NotImplemented: No dynamic KMS key migration. Set sseMigration.previousKeyType');
+ }
+
+ // If previousKeyType is provided it's used as static value to migrate the format of the key
+ // without additional dynamic evaluation if the key provider is unknown.
+ assert(isValidType(previousKeyType),
+ 'ssenMigration.previousKeyType must be "internal" or "external"');
+ this.sseMigration.previousKeyType = previousKeyType;
+
+ let expectedProtocol;
+ if (previousKeyType === KmsType.internal) {
+ // For internal key type default protocol is file and provider is scality
+ this.sseMigration.previousKeyProtocol = previousKeyProtocol || KmsProtocol.file;
+ this.sseMigration.previousKeyProvider = previousKeyProvider || 'scality';
+ expectedProtocol = [KmsProtocol.scality, KmsProtocol.mem, KmsProtocol.file];
+ } else if (previousKeyType === KmsType.external) {
+ // No defaults allowed for external provider
+ assert(previousKeyProtocol,
+ 'sseMigration.previousKeyProtocol must be defined for external provider');
+ this.sseMigration.previousKeyProtocol = previousKeyProtocol;
+ assert(previousKeyProvider,
+ 'sseMigration.previousKeyProvider must be defined for external provider');
+ this.sseMigration.previousKeyProvider = previousKeyProvider;
+ expectedProtocol = [KmsProtocol.kmip, KmsProtocol.aws_kms];
+ }
+
+ assert(isValidProtocol(previousKeyType, this.sseMigration.previousKeyProtocol),
+ `sseMigration.previousKeyProtocol must be one of ${expectedProtocol}`);
+ assert(isValidProvider(previousKeyProvider),
+ 'sseMigration.previousKeyProvider must be lowercase alphanumeric only');
+
+ if (this.sseMigration.previousKeyType === KmsType.external) {
+ if ([KmsProtocol.file, KmsProtocol.mem].includes(this.backends.kms)) {
+ assert.fail(
+ `sseMigration.previousKeyType "external" can't migrate to "internal" KMS provider ${
+ this.backends.kms}`
+ );
+ }
+ // We'd have to compare protocol & providerName
+ assert.fail('sseMigration.previousKeyType "external" is not yet available');
+ }
+ }
+ }
+
_verifyRedisPassword(password) {
return typeof password === 'string';
}
diff --git a/lib/api/apiUtils/bucket/bucketEncryption.js b/lib/api/apiUtils/bucket/bucketEncryption.js
index 0657484691..252d584034 100644
--- a/lib/api/apiUtils/bucket/bucketEncryption.js
+++ b/lib/api/apiUtils/bucket/bucketEncryption.js
@@ -2,6 +2,7 @@ const { errors } = require('arsenal');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const { parseString } = require('xml2js');
+const { isScalityKmsArn } = require('arsenal/build/lib/network/KMSInterface');
/**
* ServerSideEncryptionInfo - user configuration for server side encryption
@@ -95,6 +96,12 @@ function parseEncryptionXml(xml, log, cb) {
}
result.configuredMasterKeyId = encConfig.KMSMasterKeyID[0];
+ // If key is not in a scality arn format include a scality arn prefix
+ // of the currently selected KMS client.
+ // To keep track of KMS type, protocol and provider used
+ if (!isScalityKmsArn(result.configuredMasterKeyId)) {
+ result.configuredMasterKeyId = `${kms.arnPrefix}${result.configuredMasterKeyId}`;
+ }
}
return cb(null, result);
});
@@ -119,7 +126,12 @@ function hydrateEncryptionConfig(algorithm, configuredMasterKeyId, mandatory = n
const sseConfig = { algorithm, mandatory };
if (algorithm === 'aws:kms' && configuredMasterKeyId) {
- sseConfig.configuredMasterKeyId = configuredMasterKeyId;
+ // If key is not in a scality arn format include a scality arn prefix
+ // of the currently selected KMS client.
+ // To keep track of KMS type, protocol and provider used
+ sseConfig.configuredMasterKeyId = isScalityKmsArn(configuredMasterKeyId)
+ ? configuredMasterKeyId
+ : `${kms.arnPrefix}${configuredMasterKeyId}`;
}
if (mandatory !== null) {
diff --git a/lib/api/apiUtils/bucket/updateEncryption.js b/lib/api/apiUtils/bucket/updateEncryption.js
new file mode 100644
index 0000000000..5db3152423
--- /dev/null
+++ b/lib/api/apiUtils/bucket/updateEncryption.js
@@ -0,0 +1,141 @@
+const { getVersionSpecificMetadataOptions } = require('../object/versioning');
+// const getReplicationInfo = require('../object/getReplicationInfo');
+const { config } = require('../../../Config');
+const kms = require('../../../kms/wrapper');
+const metadata = require('../../../metadata/wrapper');
+const { isScalityKmsArn, makeScalityArnPrefix } = require('arsenal/build/lib/network/KMSInterface');
+
+// Bucket need a key from the new KMS, not a simple reformating
+function updateBucketEncryption(bucket, log, cb) {
+ const sse = bucket.getServerSideEncryption();
+
+ if (!sse) {
+ return cb(null, bucket);
+ }
+
+ const masterKey = sse.masterKeyId;
+ const configuredKey = sse.configuredMasterKeyId;
+
+ // Note: if migration is from an external to an external, absence of arn is not enough
+ // a comparison of arn will be necessary but config validation blocks this for now
+ const updateMaster = masterKey && !isScalityKmsArn(masterKey);
+ const updateConfigured = configuredKey && !isScalityKmsArn(configuredKey);
+
+ if (!updateMaster && !updateConfigured) {
+ return cb(null, bucket);
+ }
+ log.debug('trying to update bucket encryption', { oldKey: masterKey || configuredKey });
+ // this should trigger vault account key update as well
+ return kms.createBucketKey(bucket, log, (err, newSse) => {
+ if (err) {
+ return cb(err, bucket);
+ }
+ // if both keys needs migration, it is ok the use the same KMS key
+ // as the configured one should be used and the only way to use the
+ // masterKeyId is to PutBucketEncryption to AES256 but then nothing
+ // will break and the same KMS key will continue to be used.
+ // And the key is managed (created) by Scality, not passed from input.
+ if (updateMaster) {
+ sse.masterKeyId = newSse.masterKeyArn;
+ }
+ if (updateConfigured) {
+ sse.configuredMasterKeyId = newSse.masterKeyArn;
+ }
+ // KMS account key will not be deleted when bucket is deleted
+ if (newSse.isAccountEncryptionEnabled) {
+ sse.isAccountEncryptionEnabled = newSse.isAccountEncryptionEnabled;
+ }
+
+ log.info('updating bucket encryption', {
+ oldKey: masterKey || configuredKey,
+ newKey: newSse.masterKeyArn,
+ isAccount: newSse.isAccountEncryptionEnabled,
+ });
+ return metadata.updateBucket(bucket.getName(), bucket, log, err => cb(err, bucket));
+ });
+}
+
+// Only reformat the key, don't generate a new one.
+// Use opts.skipObjectUpdate to only prepare objMD without sending the update to metadata
+// if a metadata.putObjectMD is expected later in call flow. (Downside: update skipped if error)
+function updateObjectEncryption(bucket, objMD, objectKey, log, keyArnPrefix, opts, cb) {
+ if (!objMD) {
+ return cb(null, bucket, objMD);
+ }
+
+ const key = objMD['x-amz-server-side-encryption-aws-kms-key-id'];
+
+ if (!key || isScalityKmsArn(key)) {
+ return cb(null, bucket, objMD);
+ }
+ const newKey = `${keyArnPrefix}${key}`;
+ // eslint-disable-next-line no-param-reassign
+ objMD['x-amz-server-side-encryption-aws-kms-key-id'] = newKey;
+ // Doesn't seem to be used but update as well
+ for (const dataLocator of objMD.location || []) {
+ if (dataLocator.masterKeyId) {
+ dataLocator.masterKeyId = `${keyArnPrefix}${dataLocator.masterKeyId}`;
+ }
+ }
+ // eslint-disable-next-line no-param-reassign
+ objMD.originOp = 's3:ObjectCreated:Copy';
+ // Copy should be tested for 9.5 in INTGR-1038
+ // to make sure it does not impact backbeat CRR / bucket notif
+ const params = getVersionSpecificMetadataOptions(objMD, config.nullVersionCompatMode);
+
+ log.info('reformating object encryption key', { oldKey: key, newKey, skipUpdate: opts.skipObjectUpdate });
+ if (opts.skipObjectUpdate) {
+ return cb(null, bucket, objMD);
+ }
+ return metadata.putObjectMD(bucket.getName(), objectKey, objMD, params,
+ log, err => cb(err, bucket, objMD));
+}
+
+/**
+ * Update encryption of bucket and object if kms provider changed
+ *
+ * @param {Error} err - error coming from metadata validate before the action handling
+ * @param {BucketInfo} bucket - bucket
+ * @param {Object} [objMD] - object metadata
+ * @param {string} objectKey - objectKey from request.
+ * @param {Logger} log - request logger
+ * @param {Object} opts - options for sseMigration
+ * @param {boolean} [opts.skipObject] - ignore object update
+ * @param {boolean} [opts.skipObjectUpdate] - don't update metadata but prepare objMD for later update
+ * @param {Function} cb - callback (err, bucket, objMD)
+ * @returns {undefined}
+ */
+function updateEncryption(err, bucket, objMD, objectKey, log, opts, cb) {
+ // Error passed here to call the function inbetween the metadataValidate and its callback
+ if (err) {
+ return cb(err);
+ }
+ // if objMD missing, still try updateBucketEncryption
+ if (!config.sseMigration) {
+ return cb(null, bucket, objMD);
+ }
+
+ const { previousKeyType, previousKeyProtocol, previousKeyProvider } = config.sseMigration;
+ // previousKeyType is required and validated in Config.js
+ // for now it is the only implementation we need.
+ // See TAD Seamless decryption with internal and external KMS: https://scality.atlassian.net/wiki/x/EgADu
+ // for other method of migration without a previousKeyType
+
+ const keyArnPrefix = makeScalityArnPrefix(previousKeyType, previousKeyProtocol, previousKeyProvider);
+
+ return updateBucketEncryption(bucket, log, (err, bucket) => {
+ // Any error in updating encryption at bucket or object level is returned to client.
+ // Other possibilities: ignore error, include sse migration notice in error message.
+ if (err) {
+ return cb(err, bucket, objMD);
+ }
+ if (opts.skipObject) {
+ return cb(err, bucket, objMD);
+ }
+ return updateObjectEncryption(bucket, objMD, objectKey, log, keyArnPrefix, opts, cb);
+ });
+}
+
+module.exports = {
+ updateEncryption,
+};
diff --git a/lib/api/apiUtils/object/sseHeaders.js b/lib/api/apiUtils/object/sseHeaders.js
new file mode 100644
index 0000000000..8ed85a828e
--- /dev/null
+++ b/lib/api/apiUtils/object/sseHeaders.js
@@ -0,0 +1,18 @@
+const { config } = require('../../../Config');
+const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface');
+
+function setSSEHeaders(headers, algo, kmsKey) {
+ if (algo) {
+ // eslint-disable-next-line no-param-reassign
+ headers['x-amz-server-side-encryption'] = algo;
+ if (kmsKey && algo === 'aws:kms') {
+ // eslint-disable-next-line no-param-reassign
+ headers['x-amz-server-side-encryption-aws-kms-key-id'] =
+ config.kmsHideScalityArn ? getKeyIdFromArn(kmsKey) : kmsKey;
+ }
+ }
+}
+
+module.exports = {
+ setSSEHeaders,
+};
diff --git a/lib/api/bucketGetEncryption.js b/lib/api/bucketGetEncryption.js
index 8720b69b4d..78e4be235f 100644
--- a/lib/api/bucketGetEncryption.js
+++ b/lib/api/bucketGetEncryption.js
@@ -6,6 +6,8 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml;
+const { config } = require('../Config');
+const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface');
/**
* Bucket Get Encryption - Get bucket SSE configuration
@@ -60,7 +62,11 @@ function bucketGetEncryption(authInfo, request, log, callback) {
];
if (sseInfo.configuredMasterKeyId) {
- xml.push(`${escapeForXml(sseInfo.configuredMasterKeyId)}`);
+ xml.push(`${escapeForXml(
+ config.kmsHideScalityArn
+ ? getKeyIdFromArn(sseInfo.configuredMasterKeyId)
+ : sseInfo.configuredMasterKeyId
+ )}`);
}
xml.push(
diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js
index e728b2d269..dcf7b84cd1 100644
--- a/lib/api/completeMultipartUpload.js
+++ b/lib/api/completeMultipartUpload.js
@@ -22,6 +22,7 @@ const locationConstraintCheck
const locationKeysHaveChanged
= require('./apiUtils/object/locationKeysHaveChanged');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
+const { setSSEHeaders } = require('./apiUtils/object/sseHeaders');
const logger = require('../utilities/logger');
@@ -312,14 +313,15 @@ function completeMultipartUpload(authInfo, request, log, callback) {
if (storedMetadata.legalHold) {
metaStoreParams.legalHold = storedMetadata.legalHold;
}
- const serverSideEncryption =
- destBucket.getServerSideEncryption();
+ const serverSideEncryption = storedMetadata['x-amz-server-side-encryption'];
let pseudoCipherBundle = null;
if (serverSideEncryption) {
+ const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id'];
pseudoCipherBundle = {
- algorithm: destBucket.getSseAlgorithm(),
- masterKeyId: destBucket.getSseMasterKeyId(),
+ algorithm: serverSideEncryption,
+ masterKeyId: kmsKey,
};
+ setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey);
}
return versioningPreprocessing(bucketName,
destBucket, objectKey, objMD, log, (err, options) => {
diff --git a/lib/api/initiateMultipartUpload.js b/lib/api/initiateMultipartUpload.js
index 2973c790f4..f50f8948aa 100644
--- a/lib/api/initiateMultipartUpload.js
+++ b/lib/api/initiateMultipartUpload.js
@@ -20,6 +20,8 @@ const { validateHeaders, compareObjectLockInformation } =
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { data } = require('../data/wrapper');
+const { setSSEHeaders } = require('./apiUtils/object/sseHeaders');
+const { updateEncryption } = require('./apiUtils/bucket/updateEncryption');
/*
Sample xml response:
@@ -179,6 +181,10 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
},
});
+ setSSEHeaders(corsHeaders,
+ mpuMD['x-amz-server-side-encryption'],
+ mpuMD['x-amz-server-side-encryption-aws-kms-key-id']);
+
monitoring.promMetrics('PUT', bucketName, '200',
'initiateMultipartUpload');
return callback(null, xml, corsHeaders);
@@ -189,9 +195,13 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
function _storetheMPObject(destinationBucket, corsHeaders, serverSideEncryption) {
let cipherBundle = null;
if (serverSideEncryption) {
+ const { algorithm, configuredMasterKeyId, masterKeyId } = serverSideEncryption;
+ if (configuredMasterKeyId) {
+ log.debug('using user configured kms master key id');
+ }
cipherBundle = {
- algorithm: serverSideEncryption.algorithm,
- masterKeyId: serverSideEncryption.masterKeyId,
+ algorithm,
+ masterKeyId: configuredMasterKeyId || masterKeyId,
};
}
const backendInfoObj = locationConstraintCheck(request, null,
@@ -263,6 +273,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
+ (error, destinationBucket, destObjMD) =>
+ updateEncryption(error, destinationBucket, destObjMD, objectKey, log, { skipObject: true },
(error, destinationBucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, destinationBucket);
@@ -275,7 +287,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
return next(error, corsHeaders);
}
return next(null, corsHeaders, destinationBucket);
- }),
+ })),
(corsHeaders, destinationBucket, next) => {
if (destinationBucket.hasDeletedFlag() && accountCanonicalID !== destinationBucket.getOwner()) {
log.trace('deleted flag on bucket and request from non-owner account');
diff --git a/lib/api/objectCopy.js b/lib/api/objectCopy.js
index 3171114064..2190e116ae 100644
--- a/lib/api/objectCopy.js
+++ b/lib/api/objectCopy.js
@@ -23,6 +23,8 @@ const { config } = require('../Config');
const monitoring = require('../utilities/metrics');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
+const { setSSEHeaders } = require('./apiUtils/object/sseHeaders');
+const { updateEncryption } = require('./apiUtils/bucket/updateEncryption');
const versionIdUtils = versioning.VersionID;
const locationHeader = constants.objectLocationConstraintHeader;
@@ -250,6 +252,8 @@ function objectCopy(authInfo, request, sourceBucket,
return async.waterfall([
function checkDestAuth(next) {
return metadataValidateBucketAndObj(valPutParams, log,
+ (err, destBucketMD, destObjMD) =>
+ updateEncryption(err, destBucketMD, destObjMD, destObjectKey, log, { skipObject: true },
(err, destBucketMD, destObjMD) => {
if (err) {
log.debug('error validating put part of request',
@@ -264,7 +268,7 @@ function objectCopy(authInfo, request, sourceBucket,
return next(errors.NoSuchBucket);
}
return next(null, destBucketMD, destObjMD);
- });
+ }));
},
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return metadataValidateBucketAndObj(valGetParams, log,
@@ -530,13 +534,10 @@ function objectCopy(authInfo, request, sourceBucket,
].join('');
const additionalHeaders = corsHeaders || {};
if (serverSideEncryption) {
- additionalHeaders['x-amz-server-side-encryption'] =
- serverSideEncryption.algorithm;
- if (serverSideEncryption.algorithm === 'aws:kms') {
- additionalHeaders[
- 'x-amz-server-side-encryption-aws-kms-key-id'] =
- serverSideEncryption.masterKeyId;
- }
+ setSSEHeaders(additionalHeaders,
+ serverSideEncryption.algorithm,
+ serverSideEncryption.configuredMasterKeyId || serverSideEncryption.masterKeyId
+ );
}
if (sourceVersionId) {
additionalHeaders['x-amz-copy-source-version-id'] =
diff --git a/lib/api/objectGet.js b/lib/api/objectGet.js
index f36a177051..ae4a77214d 100644
--- a/lib/api/objectGet.js
+++ b/lib/api/objectGet.js
@@ -13,6 +13,7 @@ const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/metrics');
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
+const { updateEncryption } = require('./apiUtils/bucket/updateEncryption');
const validateHeaders = s3middleware.validateConditionalHeaders;
@@ -51,6 +52,7 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
};
return metadataValidateBucketAndObj(mdValParams, log,
+ (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, {},
(err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
@@ -255,7 +257,7 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
return callback(null, dataLocator, responseMetaHeaders,
byteRange);
});
- });
+ }));
}
module.exports = objectGet;
diff --git a/lib/api/objectHead.js b/lib/api/objectHead.js
index 01420d8443..eeafa99cbf 100644
--- a/lib/api/objectHead.js
+++ b/lib/api/objectHead.js
@@ -14,6 +14,7 @@ const { getPartNumber, getPartSize, getPartCountFromMd5 } =
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { maximumAllowedPartCount } = require('../../constants');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
+const { updateEncryption } = require('./apiUtils/bucket/updateEncryption');
/**
* HEAD Object - Same as Get Object but only respond with headers
@@ -51,6 +52,7 @@ function objectHead(authInfo, request, log, callback) {
};
return metadataValidateBucketAndObj(mdValParams, log,
+ (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, {},
(err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
@@ -160,7 +162,7 @@ function objectHead(authInfo, request, log, callback) {
});
monitoring.promMetrics('HEAD', bucketName, '200', 'headObject');
return callback(null, responseHeaders);
- });
+ }));
}
module.exports = objectHead;
diff --git a/lib/api/objectPut.js b/lib/api/objectPut.js
index 12fdf5b1d5..94a51c0df3 100644
--- a/lib/api/objectPut.js
+++ b/lib/api/objectPut.js
@@ -19,6 +19,7 @@ const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeade
const writeContinue = require('../utilities/writeContinue');
const versionIdUtils = versioning.VersionID;
+const { updateEncryption } = require('./apiUtils/bucket/updateEncryption');
/**
* PUT Object in the requested bucket. Steps include:
@@ -79,6 +80,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
log.trace('owner canonicalID to send to data', { canonicalID });
return metadataValidateBucketAndObj(valParams, log,
+ (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, { skipObject: true },
(err, bucket, objMD) => {
const responseHeaders = collectCorsHeaders(headers.origin,
method, bucket);
@@ -202,7 +204,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
null, ingestSize);
return callback(null, responseHeaders);
});
- });
+ }));
}
module.exports = objectPut;
diff --git a/lib/api/objectPutCopyPart.js b/lib/api/objectPutCopyPart.js
index c423cc556b..058627fddf 100644
--- a/lib/api/objectPutCopyPart.js
+++ b/lib/api/objectPutCopyPart.js
@@ -14,6 +14,7 @@ const services = require('../services');
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/metrics');
+const { setSSEHeaders } = require('./apiUtils/object/sseHeaders');
const versionIdUtils = versioning.VersionID;
const { config } = require('../Config');
@@ -238,9 +239,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
const destObjLocationConstraint =
res.controllingLocationConstraint;
+ const sseAlgo = res['x-amz-server-side-encryption'];
+ const sse = sseAlgo ? {
+ algorithm: sseAlgo,
+ masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'],
+ } : null;
return next(null, dataLocator, destBucketMD,
destObjLocationConstraint, copyObjectSize,
- sourceVerId, sourceLocationConstraintName, splitter);
+ sourceVerId, sourceLocationConstraintName, sse, splitter);
});
},
function goGetData(
@@ -250,6 +256,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
copyObjectSize,
sourceVerId,
sourceLocationConstraintName,
+ sse,
splitter,
next,
) {
@@ -262,6 +269,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
dataLocator,
dataStoreContext,
locationConstraintCheck,
+ sse,
(error, eTag, lastModified, serverSideEncryption, locations) => {
if (error) {
if (error.message === 'skip') {
@@ -415,12 +423,9 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
const additionalHeaders = corsHeaders || {};
if (serverSideEncryption) {
- additionalHeaders['x-amz-server-side-encryption'] =
- serverSideEncryption.algorithm;
- if (serverSideEncryption.algorithm === 'aws:kms') {
- additionalHeaders['x-amz-server-side-encryption-aws-kms-key-id']
- = serverSideEncryption.masterKeyId;
- }
+ setSSEHeaders(additionalHeaders,
+ serverSideEncryption.algorithm,
+ serverSideEncryption.masterKeyId);
}
additionalHeaders['x-amz-copy-source-version-id'] = sourceVerId;
pushMetric('uploadPartCopy', log, {
diff --git a/lib/api/objectPutPart.js b/lib/api/objectPutPart.js
index edb255c0d8..d7074ba77f 100644
--- a/lib/api/objectPutPart.js
+++ b/lib/api/objectPutPart.js
@@ -18,8 +18,9 @@ const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck');
const monitoring = require('../utilities/metrics');
const writeContinue = require('../utilities/writeContinue');
-const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
+const { parseObjectEncryptionHeaders } = require('./apiUtils/bucket/bucketEncryption');
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
+const { setSSEHeaders } = require('./apiUtils/object/sseHeaders');
const skipError = new Error('skip');
@@ -129,29 +130,21 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}
return next(null, destinationBucket);
},
- // Get bucket server-side encryption, if it exists.
- (destinationBucket, next) => getObjectSSEConfiguration(
- request.headers, destinationBucket, log,
- (err, sseConfig) => next(err, destinationBucket, sseConfig)),
- (destinationBucket, encryption, next) => {
- // If bucket has server-side encryption, pass the `res` value
- if (encryption) {
- return kms.createCipherBundle(encryption, log, (err, res) => {
- if (err) {
- log.error('error processing the cipher bundle for ' +
- 'the destination bucket', {
- error: err,
- });
- return next(err, destinationBucket);
- }
- return next(null, destinationBucket, res);
- });
+ // Validate that no object SSE is provided for part.
+ // Part must use SSE from initiateMPU (overview in metadata)
+ (destinationBucket, next) => {
+ const { error, objectSSE } = parseObjectEncryptionHeaders(request.headers);
+ if (error) {
+ return next(error, destinationBucket);
+ }
+ if (objectSSE.algorithm) {
+ return next(errors.InvalidArgument.customizeDescription(
+ 'x-amz-server-side-encryption header is not supported for this operation.'));
}
- // The bucket does not have server-side encryption, so pass `null`
- return next(null, destinationBucket, null);
+ return next(null, destinationBucket);
},
// Get the MPU shadow bucket.
- (destinationBucket, cipherBundle, next) =>
+ (destinationBucket, next) =>
metadata.getBucket(mpuBucketName, log,
(err, mpuBucket) => {
if (err && err.is.NoSuchBucket) {
@@ -169,10 +162,10 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
if (mpuBucket.getMdBucketModelVersion() < 2) {
splitter = constants.oldSplitter;
}
- return next(null, destinationBucket, cipherBundle, splitter);
+ return next(null, destinationBucket, splitter);
}),
// Check authorization of the MPU shadow bucket.
- (destinationBucket, cipherBundle, splitter, next) => {
+ (destinationBucket, splitter, next) => {
const mpuOverviewKey = _getOverviewKey(splitter, objectKey,
uploadId);
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log,
@@ -193,11 +186,36 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
const objectLocationConstraint =
res.controllingLocationConstraint;
+ const sseAlgo = res['x-amz-server-side-encryption'];
+ const sse = sseAlgo ? {
+ algorithm: sseAlgo,
+ masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'],
+ } : null;
return next(null, destinationBucket,
objectLocationConstraint,
- cipherBundle, splitter);
+ sse, splitter);
});
},
+ // Use MPU overview SSE config
+ (destinationBucket, objectLocationConstraint, encryption, splitter, next) => {
+ // If MPU has server-side encryption, pass the `res` value
+ if (encryption) {
+ return kms.createCipherBundle(encryption, log, (err, res) => {
+ if (err) {
+ log.error('error processing the cipher bundle for ' +
+ 'the destination bucket', {
+ error: err,
+ });
+ return next(err, destinationBucket);
+ }
+ return next(null, destinationBucket, objectLocationConstraint, res, splitter);
+ // Allow KMS to use a key from previous provider (if sseMigration configured)
+ // Because ongoing MPU started before sseMigration is no migrated
+ }, { previousOk: true });
+ }
+ // The MPU does not have server-side encryption, so pass `null`
+ return next(null, destinationBucket, objectLocationConstraint, null, splitter);
+ },
// If data backend is backend that handles mpu (like real AWS),
// no need to store part info in metadata
(destinationBucket, objectLocationConstraint, cipherBundle,
@@ -218,6 +236,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
return next(err, destinationBucket);
}
// if data backend handles mpu, skip to end of waterfall
+ // TODO CLDSRV-640 (artesca) data backend should return SSE to include in response headers
if (partInfo && partInfo.dataStoreType === 'aws_s3') {
return next(skipError, destinationBucket,
partInfo.dataStoreETag);
@@ -300,6 +319,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
// Use an array to be consistent with objectPutCopyPart where there
// could be multiple locations.
const partLocations = [dataGetInfo];
+ const sseHeaders = {};
if (cipherBundle) {
const { algorithm, masterKeyId, cryptoScheme,
cipheredDataKey } = cipherBundle;
@@ -307,6 +327,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
partLocations[0].sseMasterKeyId = masterKeyId;
partLocations[0].sseCryptoScheme = cryptoScheme;
partLocations[0].sseCipheredDataKey = cipheredDataKey;
+ sseHeaders.algo = algorithm;
+ sseHeaders.kmsKey = masterKeyId;
}
const omVal = {
// back to Version 3 since number-subparts is not needed
@@ -327,14 +349,14 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
return next(err, destinationBucket);
}
return next(null, partLocations, oldLocations, objectLocationConstraint,
- destinationBucket, hexDigest, prevObjectSize, splitter);
+ destinationBucket, hexDigest, sseHeaders, prevObjectSize, splitter);
});
},
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
- hexDigest, prevObjectSize, splitter, next) => {
+ hexDigest, sseHeaders, prevObjectSize, splitter, next) => {
if (!oldLocations) {
return next(null, oldLocations, objectLocationConstraint,
- destinationBucket, hexDigest, prevObjectSize);
+ destinationBucket, hexDigest, sseHeaders, prevObjectSize);
}
return services.isCompleteMPUInProgress({
bucketName,
@@ -362,13 +384,13 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
oldLocationsToDelete = null;
}
return next(null, oldLocationsToDelete, objectLocationConstraint,
- destinationBucket, hexDigest, prevObjectSize);
+ destinationBucket, hexDigest, sseHeaders, prevObjectSize);
});
},
// Clean up any old data now that new metadata (with new
// data locations) has been stored.
(oldLocationsToDelete, objectLocationConstraint, destinationBucket, hexDigest,
- prevObjectSize, next) => {
+ sseHeaders, prevObjectSize, next) => {
if (oldLocationsToDelete) {
log.trace('overwriting mpu part, deleting data');
const delLog = logger.newRequestLoggerFromSerializedUids(
@@ -383,15 +405,18 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
{ error: err });
}
return next(null, destinationBucket, hexDigest,
- prevObjectSize);
+ sseHeaders, prevObjectSize);
});
}
return next(null, destinationBucket, hexDigest,
- prevObjectSize);
+ sseHeaders, prevObjectSize);
},
- ], (err, destinationBucket, hexDigest, prevObjectSize) => {
+ ], (err, destinationBucket, hexDigest, sseHeaders, prevObjectSize) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, destinationBucket);
+ if (sseHeaders) {
+ setSSEHeaders(corsHeaders, sseHeaders.algo, sseHeaders.kmsKey);
+ }
if (err) {
if (err === skipError) {
return cb(null, hexDigest, corsHeaders);
diff --git a/lib/kms/file/backend.js b/lib/kms/file/backend.js
index c841f9aa82..5f430b21ad 100644
--- a/lib/kms/file/backend.js
+++ b/lib/kms/file/backend.js
@@ -1,10 +1,14 @@
const Common = require('../common');
+const { KmsType, KmsProtocol, getKeyIdFromArn, makeBackend } = require('arsenal/build/lib/network/KMSInterface');
+
+const kmsBackend = makeBackend(KmsType.internal, KmsProtocol.file, 'scality');
const backend = {
/*
* Target implementation will be async. let's mimic it
*/
+ backend: kmsBackend,
/**
*
@@ -19,7 +23,7 @@ const backend = {
// Using createDataKey here for purposes of createBucketKeyMem
// so that we do not need a separate function.
const newKey = Common.createDataKey().toString('hex');
- cb(null, newKey);
+ cb(null, newKey, `${kmsBackend.arnPrefix}${newKey}`);
});
},
@@ -43,7 +47,7 @@ const backend = {
/**
*
* @param {number} cryptoScheme - crypto scheme version number
- * @param {string} masterKeyId - master key; for the file backend
+ * @param {string} masterKeyIdOrArn - master key; for the file backend
* the master key is the actual bucket master key rather than the key to
* retrieve the actual key from a dictionary
* @param {buffer} plainTextDataKey - data key
@@ -53,11 +57,12 @@ const backend = {
* @callback called with (err, cipheredDataKey: Buffer)
*/
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
- masterKeyId,
+ masterKeyIdOrArn,
plainTextDataKey,
log,
cb) {
process.nextTick(() => {
+ const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn);
const masterKey = Buffer.from(masterKeyId, 'hex');
Common.createCipher(
cryptoScheme, masterKey, 0, log,
@@ -84,7 +89,7 @@ const backend = {
/**
*
* @param {number} cryptoScheme - crypto scheme version number
- * @param {string} masterKeyId - master key; for the file backend
+ * @param {string} masterKeyIdOrArn - master key; for the file backend
* the master key is the actual bucket master key rather than the key to
* retrieve the actual key from a dictionary
* @param {buffer} cipheredDataKey - data key
@@ -94,11 +99,12 @@ const backend = {
* @callback called with (err, plainTextDataKey: Buffer)
*/
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
- masterKeyId,
+ masterKeyIdOrArn,
cipheredDataKey,
log,
cb) {
process.nextTick(() => {
+ const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn);
const masterKey = Buffer.from(masterKeyId, 'hex');
Common.createDecipher(
cryptoScheme, masterKey, 0, log,
diff --git a/lib/kms/in_memory/backend.js b/lib/kms/in_memory/backend.js
index 2a694e28b2..609ab17333 100644
--- a/lib/kms/in_memory/backend.js
+++ b/lib/kms/in_memory/backend.js
@@ -1,13 +1,18 @@
const Common = require('../common');
+const { KmsType, KmsProtocol, getKeyIdFromArn, makeBackend } = require('arsenal/build/lib/network/KMSInterface');
const kms = [];
let count = 1;
+const kmsBackend = makeBackend(KmsType.internal, KmsProtocol.mem, 'scality');
+
const backend = {
/*
* Target implementation will be async. let's mimic it
*/
+ backend: kmsBackend,
+
supportsDefaultKeyPerAccount: false,
/**
@@ -23,20 +28,22 @@ const backend = {
// Using createDataKey here for purposes of createBucketKeyMem
// so that we do not need a separate function.
kms[count] = Common.createDataKey();
- cb(null, (count++).toString());
+ const keyId = (count++).toString();
+ cb(null, keyId, `${kmsBackend.arnPrefix}${keyId}`);
});
},
/**
*
- * @param {string} bucketKeyId - the Id of the bucket key
+ * @param {string} bucketKeyIdOrArn - the Id of the bucket key
* @param {object} log - logger object
* @param {function} cb - callback
* @returns {undefined}
* @callback called with (err)
*/
- destroyBucketKey: function destroyBucketKeyMem(bucketKeyId, log, cb) {
+ destroyBucketKey: function destroyBucketKeyMem(bucketKeyIdOrArn, log, cb) {
process.nextTick(() => {
+ const bucketKeyId = getKeyIdFromArn(bucketKeyIdOrArn);
kms[bucketKeyId] = undefined;
cb(null);
});
@@ -45,7 +52,7 @@ const backend = {
/**
*
* @param {number} cryptoScheme - crypto scheme version number
- * @param {string} masterKeyId - key to retrieve master key
+ * @param {string} masterKeyIdOrArn - key to retrieve master key
* @param {buffer} plainTextDataKey - data key
* @param {object} log - logger object
* @param {function} cb - callback
@@ -53,11 +60,12 @@ const backend = {
* @callback called with (err, cipheredDataKey: Buffer)
*/
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
- masterKeyId,
+ masterKeyIdOrArn,
plainTextDataKey,
log,
cb) {
process.nextTick(() => {
+ const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn);
Common.createCipher(
cryptoScheme, kms[masterKeyId], 0, log,
(err, cipher) => {
@@ -83,7 +91,7 @@ const backend = {
/**
*
* @param {number} cryptoScheme - crypto scheme version number
- * @param {string} masterKeyId - key to retrieve master key
+ * @param {string} masterKeyIdOrArn - key to retrieve master key
* @param {buffer} cipheredDataKey - data key
* @param {object} log - logger object
* @param {function} cb - callback
@@ -91,11 +99,12 @@ const backend = {
* @callback called with (err, plainTextDataKey: Buffer)
*/
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
- masterKeyId,
+ masterKeyIdOrArn,
cipheredDataKey,
log,
cb) {
process.nextTick(() => {
+ const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn);
Common.createDecipher(
cryptoScheme, kms[masterKeyId], 0, log,
(err, decipher) => {
diff --git a/lib/kms/wrapper.js b/lib/kms/wrapper.js
index a00e52edb8..d8f161ff82 100644
--- a/lib/kms/wrapper.js
+++ b/lib/kms/wrapper.js
@@ -10,49 +10,156 @@ const KMIPClient = require('arsenal').network.kmipClient;
const { KmsAWSClient } = require('arsenal').network;
const Common = require('./common');
const vault = require('../auth/vault');
-let scalityKMS;
-let scalityKMSImpl;
-try {
- // eslint-disable-next-line import/no-unresolved
- const ScalityKMS = require('scality-kms');
- scalityKMS = new ScalityKMS(config.kms);
- scalityKMSImpl = 'scalityKms';
-} catch (error) {
- logger.warn('scality kms unavailable. ' +
- 'Using file kms backend unless mem specified.',
- { error });
- scalityKMS = file;
- scalityKMSImpl = 'fileKms';
+const {
+ KmsProtocol,
+ makeBackend,
+ isScalityKmsArn,
+ extractDetailFromArn,
+ validateKeyDetail,
+} = require('arsenal/build/lib/network/KMSInterface');
+
+function getScalityKms() {
+ let scalityKMS;
+ let scalityKMSImpl;
+
+ try {
+ // eslint-disable-next-line import/no-unresolved
+ const ScalityKMS = require('scality-kms');
+ scalityKMS = new ScalityKMS(config.kms);
+ scalityKMSImpl = 'scalityKms';
+ } catch (error) {
+ logger.warn('scality kms unavailable. ' +
+ 'Using file kms backend unless mem specified.',
+ { error });
+ scalityKMS = file;
+ scalityKMSImpl = 'fileKms';
+ }
+ return { scalityKMS, scalityKMSImpl };
}
-let client;
-let implName;
-
-if (config.backends.kms === 'mem') {
- client = inMemory;
- implName = 'memoryKms';
-} else if (config.backends.kms === 'file' || config.backends.kms === 'cdmi') {
- client = file;
- implName = 'fileKms';
-} else if (config.backends.kms === 'scality') {
- client = scalityKMS;
- implName = scalityKMSImpl;
-} else if (config.backends.kms === 'kmip') {
- const kmipConfig = { kmip: config.kmip };
- if (!kmipConfig.kmip) {
- throw new Error('KMIP KMS driver configuration is missing.');
+const kmsFactory = {
+ mem: () => ({ client: inMemory, implName: 'memoryKms' }),
+ file: () => ({ client: file, implName: 'fileKms' }),
+ cdmi: () => ({ client: file, implName: 'fileKms' }),
+ scality: () => {
+ const { scalityKMS, scalityKMSImpl } = getScalityKms();
+ return { client: scalityKMS, implName: scalityKMSImpl };
+ },
+ kmip: () => {
+ if (!config.kmip) {
+ throw new Error('KMIP KMS driver configuration is missing.');
+ }
+ return { client: new KMIPClient({ kmip: config.kmip }), implName: 'kmip' };
+ },
+ aws: () => {
+ if (!config.kmsAWS) {
+ throw new Error('AWS KMS driver configuration is missing.');
+ }
+ return { client: new KmsAWSClient({ kmsAWS: config.kmsAWS }), implName: 'aws' };
+ },
+};
+
+function getClient(kms) {
+ const impl = kmsFactory[kms];
+ if (!impl) {
+ throw new Error(`KMS backend is not configured: ${kms}`);
}
- client = new KMIPClient(kmipConfig);
- implName = 'kmip';
-} else if (config.backends.kms === 'aws') {
- const awsConfig = { kmsAWS: config.kmsAWS };
- client = new KmsAWSClient(awsConfig);
- implName = 'aws';
-} else {
- throw new Error(`KMS backend is not configured: ${config.backends.kms}`);
+ return impl();
+}
+
+/**
+ * Note: non current instance from previous keys won't be healthchecked
+ * `{ [`type:protocol:provider`]: clientDetails }`
+ */
+const clientInstances = {};
+
+const { client, implName } = getClient(config.backends.kms);
+const { type, protocol, provider } = client.backend;
+const currentIdentifier = `${type}:${protocol}:${provider}`;
+clientInstances[currentIdentifier] = { client, implName };
+
+const availableBackends = [client.backend];
+
+const mapKmsProtocolToClient = {
+ [KmsProtocol.aws_kms]: 'aws',
+ // others already match
+};
+
+let previousBackend;
+let previousIdentifier;
+
+if (config.sseMigration) {
+ previousBackend = makeBackend(
+ config.sseMigration.previousKeyType,
+ config.sseMigration.previousKeyProtocol,
+ config.sseMigration.previousKeyProvider
+ );
+ availableBackends.push(previousBackend);
+ previousIdentifier = `${previousBackend.type
+ }:${previousBackend.protocol
+ }:${previousBackend.provider}`;
+
+ // Pre instantiate previous backend as for now only internal backend (file) is supported
+ // for future multiple external backend we should consider keeping open connection to
+ // external backend, healthcheck and idle timeout (if migration is finished)
+ // a config flag could help toggle this behavior
+ const previousKms = mapKmsProtocolToClient[previousBackend.protocol] || previousBackend.protocol;
+ const previousInstance = getClient(previousKms);
+ clientInstances[previousIdentifier] = previousInstance;
+}
+
+/**
+ * Extract backend provider from key, validate arn for errors.
+ * @param {string} key KeyId or KeyArn
+ * @param {object} log logger
+ * @returns {object} error or client with extracted KeyId
+ */
+function getClientForKey(key, log) {
+ // if extraction only return the id, it is not a scality arnPrefix
+ const detail = extractDetailFromArn(key);
+ let clientIdentifier;
+ if (detail.type) {
+ // if type was extracted, it is a scality arnPrefix, it needs validation
+ // might throw if arn malformed or backend not available
+ // for any request (PUT or GET)
+ const error = validateKeyDetail(detail, availableBackends);
+ if (error) {
+ log.error('KMS key arn is invalid', { key, detail, availableBackends });
+ return { error };
+ }
+ clientIdentifier = `${detail.type}:${detail.protocol}:${detail.provider}`;
+ } else if (config.sseMigration) {
+ // if not a scality arnPrefix but migration from previous KMS
+ clientIdentifier = previousIdentifier;
+ } else {
+ // if not a scality arnPrefix and no migration
+ clientIdentifier = currentIdentifier;
+ }
+
+ const instance = clientInstances[clientIdentifier];
+
+ if (instance) {
+ // was already instantiated
+ // return the extracted key id to avoid further processing of potential arn
+ // return clientIdentifier to allow usage restriction
+ return { ...instance, clientIdentifier, key: detail.id };
+ }
+
+ // Only pre instantiated previous KMS from sseMigration is supported now
+ // Here we could instantiate other provider on the fly to manage multi providers
+ log.error('KMS key doesn\'t match any KMS instance', { key, detail, availableBackends });
+ return { error: new errors.InvalidArgument
+ // eslint-disable-next-line new-cap
+ .customizeDescription(`KMS unknown provider for key ${key}`),
+ };
}
class KMS {
+ /** Used for keys from current client */
+ static get arnPrefix() {
+ return client.backend.arnPrefix;
+ }
+
/**
* Create a new bucket encryption key.
*
@@ -65,9 +172,10 @@ class KMS {
* @param {object} log - logger object
* @param {function} cb - callback
* @returns {undefined}
- * @callback called with (err, { masterKeyId: string, isAccountEncryptionEnabled: boolean })
+ * @callback called with (err, { masterKeyId: string, masterKeyArn: string, isAccountEncryptionEnabled: boolean })
*/
static createBucketKey(bucket, log, cb) {
+ // always use current client for create
log.debug('creating a new bucket key');
// Check if the client supports the use of a default master encryption key per account
// and one is configured.
@@ -84,17 +192,22 @@ class KMS {
const { encryptionKeyId, action } = data;
log.trace('default encryption key retrieved or created at the account level from vault',
{ implName, encryptionKeyId, action });
- return cb(null, { masterKeyId: encryptionKeyId, isAccountEncryptionEnabled: true });
+ return cb(null, {
+ // vault only return arn
+ masterKeyId: encryptionKeyId,
+ masterKeyArn: encryptionKeyId,
+ isAccountEncryptionEnabled: true,
+ });
});
}
// Otherwise, create a default master encryption key, later its id will be stored at the bucket metadata level.
- return client.createBucketKey(bucket.getName(), log, (err, masterKeyId) => {
+ return client.createBucketKey(bucket.getName(), log, (err, masterKeyId, masterKeyArn) => {
if (err) {
log.debug('error from kms', { implName, error: err });
return cb(err);
}
log.trace('bucket key created in kms');
- return cb(null, { masterKeyId });
+ return cb(null, { masterKeyId, masterKeyArn });
});
}
@@ -125,7 +238,19 @@ class KMS {
};
if (algorithm === 'aws:kms' && configuredMasterKeyId) {
- serverSideEncryptionInfo.configuredMasterKeyId = configuredMasterKeyId;
+ // If input key is scality arn format it needs validation
+ // otherwise prepend the current KMS client arnPrefix
+ if (isScalityKmsArn(configuredMasterKeyId)) {
+ const detail = extractDetailFromArn(configuredMasterKeyId);
+ const error = validateKeyDetail(detail, availableBackends);
+ if (error) {
+ return cb(error);
+ }
+ serverSideEncryptionInfo.configuredMasterKeyId = configuredMasterKeyId;
+ } else {
+ serverSideEncryptionInfo.configuredMasterKeyId =
+ `${client.backend.arnPrefix}${configuredMasterKeyId}`;
+ }
return process.nextTick(() => cb(null, serverSideEncryptionInfo));
}
@@ -135,8 +260,8 @@ class KMS {
return cb(err);
}
- const { masterKeyId, isAccountEncryptionEnabled } = data;
- serverSideEncryptionInfo.masterKeyId = masterKeyId;
+ const { masterKeyId, masterKeyArn, isAccountEncryptionEnabled } = data;
+ serverSideEncryptionInfo.masterKeyId = masterKeyArn || masterKeyId;
if (isAccountEncryptionEnabled) {
serverSideEncryptionInfo.isAccountEncryptionEnabled = isAccountEncryptionEnabled;
@@ -161,7 +286,12 @@ class KMS {
*/
static destroyBucketKey(bucketKeyId, log, cb) {
log.debug('deleting bucket key', { bucketKeyId });
- client.destroyBucketKey(bucketKeyId, log, err => {
+ // shadowing global client for key
+ const { error, client, implName, key } = getClientForKey(bucketKeyId, log);
+ if (error) {
+ return cb(error);
+ }
+ return client.destroyBucketKey(key, log, err => {
if (err) {
log.debug('error from kms', { implName, error: err });
return cb(err);
@@ -184,11 +314,13 @@ class KMS {
* true for mandatory encryption
* @param {object} log - logger object
* @param {function} cb - cb from external call
+ * @param {object} [opts] - additional options
+ * @param {boolean} [opts.previousOk] - allow usage of previous KMS (for ongoing MPU not migrated)
* @returns {undefined}
* @callback called with (err, cipherBundle)
*/
static createCipherBundle(serverSideEncryptionInfo,
- log, cb) {
+ log, cb, opts) {
const { algorithm, configuredMasterKeyId, masterKeyId: bucketMasterKeyId } = serverSideEncryptionInfo;
let masterKeyId = bucketMasterKeyId;
@@ -196,16 +328,32 @@ class KMS {
log.debug('using user configured kms master key id');
masterKeyId = configuredMasterKeyId;
}
+ // shadowing global client for key
+ // but should not happen to cipher for another client as Puts should use current KMS
+ // still extract KeyId and validate arn
+ const { error, client, implName, clientIdentifier, key } = getClientForKey(masterKeyId, log);
+ if (error) {
+ return cb(error);
+ }
+ if (previousIdentifier
+ && clientIdentifier === previousIdentifier
+ && clientIdentifier !== currentIdentifier
+ && (opts && !opts.previousOk)
+ ) {
+ return cb(errors.InvalidArgument
+ .customizeDescription(
+ 'KMS cannot use previous provider to encrypt new objects if a new provider is configured'));
+ }
const cipherBundle = {
algorithm,
- masterKeyId,
+ masterKeyId, // keep arnPrefix in cipherBundle as it is returned to callback
cryptoScheme: 1,
cipheredDataKey: null,
cipher: null,
};
- async.waterfall([
+ return async.waterfall([
function generateDataKey(next) {
/* There are 2 ways of generating a datakey :
- using the generateDataKey of the KMS backend if it exists
@@ -220,7 +368,7 @@ class KMS {
if (client.generateDataKey) {
log.debug('creating a data key using the KMS');
res = client.generateDataKey(cipherBundle.cryptoScheme,
- cipherBundle.masterKeyId,
+ key,
log, (err, plainTextDataKey, cipheredDataKey) => {
if (err) {
log.debug('error generating a new data key from KMS',
@@ -236,7 +384,7 @@ class KMS {
log.debug('ciphering the data key');
res = client.cipherDataKey(cipherBundle.cryptoScheme,
- cipherBundle.masterKeyId,
+ key,
plainTextDataKey, log, (err, cipheredDataKey) => {
if (err) {
log.debug('error encrypting the data key using KMS',
@@ -309,17 +457,25 @@ class KMS {
cryptoScheme: serverSideEncryptionInfo.cryptoScheme,
decipher: null,
};
+
+ // shadowing global client for key - implName already used can't be shadowed here
+ const { error, client, implName: _impl, key } = getClientForKey(
+ serverSideEncryptionInfo.masterKeyId, log);
+ if (error) {
+ return cb(error);
+ }
+
return async.waterfall([
function decipherDataKey(next) {
return client.decipherDataKey(
decipherBundle.cryptoScheme,
- serverSideEncryptionInfo.masterKeyId,
+ key,
serverSideEncryptionInfo.cipheredDataKey,
log, (err, plainTextDataKey) => {
log.debug('deciphering a data key');
if (err) {
log.debug('error from kms',
- { implName, error: err });
+ { implName: _impl, error: err });
return next(err);
}
log.trace('data key deciphered by the kms');
@@ -333,7 +489,7 @@ class KMS {
plainTextDataKey.fill(0);
if (err) {
log.debug('error from kms',
- { implName, error: err });
+ { implName: _impl, error: err });
return next(err);
}
log.trace('decipher created by the kms');
@@ -347,7 +503,7 @@ class KMS {
], (err, decipherBundle) => {
if (err) {
log.error('error processing decipher bundle',
- { implName, error: err });
+ { implName: _impl, error: err });
return cb(err);
}
return cb(err, decipherBundle);
diff --git a/lib/utilities/collectResponseHeaders.js b/lib/utilities/collectResponseHeaders.js
index d61b368b42..2d9c25a877 100644
--- a/lib/utilities/collectResponseHeaders.js
+++ b/lib/utilities/collectResponseHeaders.js
@@ -1,7 +1,8 @@
const { getVersionIdResHeader } = require('../api/apiUtils/object/versioning');
const checkUserMetadataSize
= require('../api/apiUtils/object/checkUserMetadataSize');
-
+const { config } = require('../Config');
+const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface');
/**
* Pulls data from saved object metadata to send in response
* @param {object} objectMD - object's metadata
@@ -40,10 +41,11 @@ function collectResponseHeaders(objectMD, corsHeaders, versioningCfg,
responseMetaHeaders['x-amz-server-side-encryption']
= objectMD['x-amz-server-side-encryption'];
}
- if (objectMD['x-amz-server-side-encryption-aws-kms-key-id'] &&
+ const kmsKey = objectMD['x-amz-server-side-encryption-aws-kms-key-id'];
+ if (kmsKey &&
objectMD['x-amz-server-side-encryption'] === 'aws:kms') {
responseMetaHeaders['x-amz-server-side-encryption-aws-kms-key-id']
- = objectMD['x-amz-server-side-encryption-aws-kms-key-id'];
+ = config.kmsHideScalityArn ? getKeyIdFromArn(kmsKey) : kmsKey;
}
responseMetaHeaders['Accept-Ranges'] = 'bytes';
diff --git a/package.json b/package.json
index 8ee1b27259..0e59ec30b9 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "s3",
- "version": "7.70.21-11",
+ "version": "7.70.21-12",
"description": "S3 connector",
"main": "index.js",
"engines": {
@@ -20,7 +20,7 @@
"homepage": "https://github.com/scality/S3#readme",
"dependencies": {
"@hapi/joi": "^17.1.0",
- "arsenal": "git+https://github.com/scality/arsenal#7.70.4-2",
+ "arsenal": "git+https://github.com/scality/arsenal#7.70.4-5-outscale-cldsrv",
"async": "~2.5.0",
"aws-sdk": "2.905.0",
"azure-storage": "^2.1.0",
diff --git a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js
index c9c4c41a14..0f5cd65342 100644
--- a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js
+++ b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js
@@ -6,6 +6,11 @@ const withV4 = require('../support/withV4');
const BucketUtility = require('../../lib/utility/bucket-util');
const kms = require('../../../../../lib/kms/wrapper');
const { DummyRequestLogger } = require('../../../../unit/helpers');
+const { config } = require('../../../../../lib/Config');
+const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface');
+
+// For this test env S3_CONFIG_FILE should be the same as running cloudserver
+// to have the same config.kmsHideScalityArn value
const log = new DummyRequestLogger();
@@ -55,7 +60,9 @@ function createExpected(sseConfig, kmsKeyId) {
}
if (sseConfig.masterKeyId) {
- expected.masterKeyId = kmsKeyId;
+ expected.masterKeyId = config.kmsHideScalityArn
+ ? getKeyIdFromArn(kmsKeyId)
+ : kmsKeyId;
}
return expected;
}
@@ -91,7 +98,7 @@ describe('per object encryption headers', () => {
const bucket = new BucketInfo('enc-bucket-test', 'OwnerId',
'OwnerDisplayName', new Date().toJSON());
kms.createBucketKey(bucket, log,
- (err, { masterKeyId: keyId }) => {
+ (err, { masterKeyArn: keyId }) => {
assert.ifError(err);
kmsKeyId = keyId;
done();
@@ -221,7 +228,9 @@ describe('per object encryption headers', () => {
done => {
const _existing = Object.assign({}, existing);
if (existing.masterKeyId) {
- _existing.masterKeyId = kmsKeyId;
+ _existing.masterKeyId = config.kmsHideScalityArn
+ ? getKeyIdFromArn(kmsKeyId)
+ : kmsKeyId;
}
const params = {
Bucket: bucket2,
diff --git a/tests/unit/api/bucketDeleteEncryption.js b/tests/unit/api/bucketDeleteEncryption.js
index fd307fdfb6..8f094571af 100644
--- a/tests/unit/api/bucketDeleteEncryption.js
+++ b/tests/unit/api/bucketDeleteEncryption.js
@@ -17,6 +17,8 @@ const bucketPutRequest = {
url: '/',
};
+const arnPrefix = inMemory.backend.arnPrefix;
+
describe('bucketDeleteEncryption API', () => {
before(() => cleanup());
@@ -129,7 +131,7 @@ describe('bucketDeleteEncryption API', () => {
assert.strictEqual(sseInfo.mandatory, true);
assert.strictEqual(sseInfo.algorithm, 'aws:kms');
assert(!sseInfo.masterKeyId);
- assert.strictEqual(sseInfo.configuredMasterKeyId, keyId2);
+ assert.strictEqual(sseInfo.configuredMasterKeyId, `${arnPrefix}${keyId2}`);
done();
});
});
@@ -155,7 +157,7 @@ describe('bucketDeleteEncryption API', () => {
assert.strictEqual(sseInfo.mandatory, true);
assert.strictEqual(sseInfo.algorithm, 'aws:kms');
assert.strictEqual(sseInfo.masterKeyId, expectedMasterKeyId);
- assert.strictEqual(sseInfo.configuredMasterKeyId, keyId);
+ assert.strictEqual(sseInfo.configuredMasterKeyId, `${arnPrefix}${keyId}`);
done();
});
});
diff --git a/tests/unit/api/bucketPut.js b/tests/unit/api/bucketPut.js
index f64ae560f6..b4105e887e 100644
--- a/tests/unit/api/bucketPut.js
+++ b/tests/unit/api/bucketPut.js
@@ -27,6 +27,7 @@ const testRequest = {
post: '',
headers: { host: `${bucketName}.s3.amazonaws.com` },
};
+const arnPrefix = inMemory.backend.arnPrefix;
const testChecks = [
{
@@ -489,6 +490,7 @@ describe('bucketPut API with bucket-level encryption', () => {
assert.strictEqual(serverSideEncryption.algorithm, 'AES256');
assert.strictEqual(serverSideEncryption.mandatory, true);
assert(serverSideEncryption.masterKeyId);
+ assert.match(serverSideEncryption.masterKeyId, new RegExp(arnPrefix));
assert(!serverSideEncryption.isAccountEncryptionEnabled);
done();
});
@@ -512,6 +514,7 @@ describe('bucketPut API with bucket-level encryption', () => {
assert.strictEqual(serverSideEncryption.algorithm, 'aws:kms');
assert.strictEqual(serverSideEncryption.mandatory, true);
assert(serverSideEncryption.masterKeyId);
+ assert.match(serverSideEncryption.masterKeyId, new RegExp(arnPrefix));
assert(!serverSideEncryption.isAccountEncryptionEnabled);
done();
});
@@ -537,7 +540,7 @@ describe('bucketPut API with bucket-level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- configuredMasterKeyId: keyId,
+ configuredMasterKeyId: `${arnPrefix}${keyId}`,
});
done();
});
@@ -595,7 +598,7 @@ describe('bucketPut API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'AES256',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
done();
@@ -620,7 +623,7 @@ describe('bucketPut API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
done();
@@ -647,7 +650,7 @@ describe('bucketPut API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- configuredMasterKeyId: keyId,
+ configuredMasterKeyId: `${arnPrefix}${keyId}`,
});
done();
});
@@ -753,7 +756,7 @@ describe('bucketPut API with SSE Configurations', () => {
const sse = md.getServerSideEncryption();
assert.strictEqual(sse.algorithm, 'aws:kms');
assert.strictEqual(sse.mandatory, true);
- assert.strictEqual(sse.configuredMasterKeyId, 'test-kms-key-id');
+ assert.strictEqual(sse.configuredMasterKeyId, `${arnPrefix}test-kms-key-id`);
done();
});
});
@@ -824,7 +827,7 @@ describe('bucketPut API with SSE Configurations', () => {
const sse = md.getServerSideEncryption();
assert.strictEqual(sse.algorithm, 'aws:kms');
assert.strictEqual(sse.mandatory, true);
- assert.strictEqual(sse.configuredMasterKeyId, 'another-kms-key-id');
+ assert.strictEqual(sse.configuredMasterKeyId, `${arnPrefix}another-kms-key-id`);
done();
});
});
diff --git a/tests/unit/api/bucketPutEncryption.js b/tests/unit/api/bucketPutEncryption.js
index 148ffd3f09..be9001dad0 100644
--- a/tests/unit/api/bucketPutEncryption.js
+++ b/tests/unit/api/bucketPutEncryption.js
@@ -19,6 +19,7 @@ const bucketPutRequest = {
headers: { host: `${bucketName}.s3.amazonaws.com` },
url: '/',
};
+const arnPrefix = inMemory.backend.arnPrefix;
describe('bucketPutEncryption API', () => {
let createBucketKeySpy;
@@ -137,7 +138,7 @@ describe('bucketPutEncryption API', () => {
mandatory: true,
algorithm: 'aws:kms',
cryptoScheme: 1,
- configuredMasterKeyId: '12345',
+ configuredMasterKeyId: `${arnPrefix}12345`,
});
done();
});
@@ -212,7 +213,7 @@ describe('bucketPutEncryption API', () => {
algorithm: 'aws:kms',
cryptoScheme: 1,
masterKeyId,
- configuredMasterKeyId: '12345',
+ configuredMasterKeyId: `${arnPrefix}12345`,
});
done();
});
@@ -259,6 +260,7 @@ describe('bucketPutEncryption API', () => {
assert.strictEqual(updatedSSEInfo.algorithm, 'AES256');
assert.strictEqual(updatedSSEInfo.cryptoScheme, 1);
assert(updatedSSEInfo.masterKeyId);
+ assert(updatedSSEInfo.masterKeyId, new RegExp(arnPrefix));
done();
});
});
@@ -314,7 +316,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'AES256',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
done();
@@ -333,7 +335,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
done();
@@ -353,7 +355,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- configuredMasterKeyId: keyId,
+ configuredMasterKeyId: `${arnPrefix}${keyId}`,
});
done();
});
@@ -366,7 +368,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'AES256',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
};
@@ -399,7 +401,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'AES256',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
const keyId = '12345';
@@ -412,8 +414,8 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
- configuredMasterKeyId: keyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
+ configuredMasterKeyId: `${arnPrefix}${keyId}`,
isAccountEncryptionEnabled: true,
});
done();
@@ -434,7 +436,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
const newConf = templateSSEConfig({ algorithm: 'AES256' });
@@ -446,7 +448,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'AES256',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
done();
@@ -468,7 +470,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'aws:kms',
mandatory: true,
- configuredMasterKeyId: keyId,
+ configuredMasterKeyId: `${arnPrefix}${keyId}`,
});
const newConf = templateSSEConfig({ algorithm: 'AES256' });
return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log,
@@ -479,7 +481,7 @@ describe('bucketPutEncryption API with account level encryption', () => {
cryptoScheme: 1,
algorithm: 'AES256',
mandatory: true,
- masterKeyId: accountLevelMasterKeyId,
+ masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`,
isAccountEncryptionEnabled: true,
});
done();
diff --git a/tests/unit/encryption/kms.js b/tests/unit/encryption/kms.js
index 7abd22c191..ff94a77d8a 100644
--- a/tests/unit/encryption/kms.js
+++ b/tests/unit/encryption/kms.js
@@ -48,7 +48,7 @@ describe('KMS unit tests', () => {
assert.strictEqual(sseInfo.cryptoScheme, 1);
assert.strictEqual(sseInfo.mandatory, true);
assert.strictEqual(sseInfo.algorithm, 'aws:kms');
- assert.strictEqual(sseInfo.configuredMasterKeyId, masterKeyId);
+ assert.strictEqual(sseInfo.configuredMasterKeyId, `${KMS.arnPrefix}${masterKeyId}`);
done();
});
});
diff --git a/tests/unit/testConfigs/parseKmsAWS.js b/tests/unit/testConfigs/parseKmsAWS.js
index d4a327413d..bd0bc32c31 100644
--- a/tests/unit/testConfigs/parseKmsAWS.js
+++ b/tests/unit/testConfigs/parseKmsAWS.js
@@ -19,23 +19,24 @@ describe('parseKmsAWS Function', () => {
});
it('should throw an error if endpoint is not defined in kmsAWS', () => {
- const config = { kmsAWS: { ak: 'ak', sk: 'sk' } };
+ const config = { kmsAWS: { providerName: 'tests', ak: 'ak', sk: 'sk' } };
assert.throws(() => configInstance._parseKmsAWS(config), 'endpoint must be defined');
});
it('should throw an error if ak is not defined in kmsAWS', () => {
- const config = { kmsAWS: { endpoint: 'https://example.com', sk: 'sk' } };
+ const config = { kmsAWS: { providerName: 'tests', endpoint: 'https://example.com', sk: 'sk' } };
assert.throws(() => configInstance._parseKmsAWS(config), 'ak must be defined');
});
it('should throw an error if sk is not defined in kmsAWS', () => {
- const config = { kmsAWS: { endpoint: 'https://example.com', ak: 'ak' } };
+ const config = { kmsAWS: { providerName: 'tests', endpoint: 'https://example.com', ak: 'ak' } };
assert.throws(() => configInstance._parseKmsAWS(config), 'sk must be defined');
});
it('should return the expected kmsAWS object when valid config is provided', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -43,6 +44,7 @@ describe('parseKmsAWS Function', () => {
};
const result = configInstance._parseKmsAWS(config);
assert.deepStrictEqual(result, {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -52,6 +54,7 @@ describe('parseKmsAWS Function', () => {
it('should include region if provided in the config', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -60,6 +63,7 @@ describe('parseKmsAWS Function', () => {
};
const result = configInstance._parseKmsAWS(config);
assert.deepStrictEqual(result, {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -70,6 +74,7 @@ describe('parseKmsAWS Function', () => {
it('should include tls configuration if provided', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -82,6 +87,7 @@ describe('parseKmsAWS Function', () => {
};
const result = configInstance._parseKmsAWS(config);
assert.deepStrictEqual(result, {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -112,6 +118,7 @@ describe('parseKmsAWS TLS section', () => {
it('should throw an error if tls.rejectUnauthorized is not a boolean', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -127,6 +134,7 @@ describe('parseKmsAWS TLS section', () => {
it('should throw an error if tls.minVersion is not a string', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -144,6 +152,7 @@ describe('parseKmsAWS TLS section', () => {
it('should throw an error if tls.maxVersion is not a string', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -161,6 +170,7 @@ describe('parseKmsAWS TLS section', () => {
it('should throw an error if tls.ca is not a string or an array', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -178,6 +188,7 @@ describe('parseKmsAWS TLS section', () => {
it('should return an empty tls object if all tls fields are undefined', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'https://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -192,6 +203,7 @@ describe('parseKmsAWS TLS section', () => {
it('should load tls.ca as an array of files', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'http://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -212,6 +224,7 @@ describe('parseKmsAWS TLS section', () => {
it('should load tls.cert as a single file', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'http://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -231,6 +244,7 @@ describe('parseKmsAWS TLS section', () => {
it('should load tls.key as a single file', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'http://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -250,6 +264,7 @@ describe('parseKmsAWS TLS section', () => {
it('should not load TLS files if tls is undefined', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'http://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -267,6 +282,7 @@ describe('parseKmsAWS TLS section', () => {
const basePath = configInstance._basePath;
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'http://example.com',
ak: 'accessKey',
sk: 'secretKey',
@@ -292,6 +308,7 @@ describe('parseKmsAWS TLS section', () => {
const config = {
kmsAWS: {
+ providerName: 'tests',
endpoint: 'http://example.com',
ak: 'accessKey',
sk: 'secretKey',
diff --git a/tests/unit/utils/multipleBackendGateway.js b/tests/unit/utils/multipleBackendGateway.js
index c9d783b676..c62b89bff6 100644
--- a/tests/unit/utils/multipleBackendGateway.js
+++ b/tests/unit/utils/multipleBackendGateway.js
@@ -1,5 +1,6 @@
const assert = require('assert');
const { checkExternalBackend } = require('arsenal').storage.data.external.backendUtils;
+const sinon = require('sinon');
const awsLocations = [
'awsbackend',
];
@@ -29,10 +30,14 @@ function getClients(isSuccess) {
describe('Testing _checkExternalBackend', function describeF() {
this.timeout(50000);
beforeEach(done => {
+ this.clock = sinon.useFakeTimers({ shouldAdvanceTime: true });
const clients = getClients(true);
return checkExternalBackend(clients, awsLocations, 'aws_s3', false,
externalBackendHealthCheckInterval, done);
});
+ afterEach(() => {
+ this.clock.restore();
+ });
it('should not refresh response before externalBackendHealthCheckInterval',
done => {
const clients = getClients(false);
@@ -59,5 +64,6 @@ describe('Testing _checkExternalBackend', function describeF() {
return done();
});
}, externalBackendHealthCheckInterval + 1);
+ this.clock.next(); // test faster
});
});
diff --git a/yarn.lock b/yarn.lock
index 4210826a96..5f7187dc6b 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -426,9 +426,9 @@ arraybuffer.slice@~0.0.7:
optionalDependencies:
ioctl "^2.0.2"
-"arsenal@git+https://github.com/scality/arsenal#7.70.4-2":
- version "7.70.4-2"
- resolved "git+https://github.com/scality/arsenal#3017b78bfb954b2ce0803c8b373bc247e3113a6c"
+"arsenal@git+https://github.com/scality/arsenal#7.70.4-5-outscale-cldsrv":
+ version "7.70.4-5-outscale-cldsrv"
+ resolved "git+https://github.com/scality/arsenal#bc1d766ad04a9ea6e205ba8c3531293b49a44015"
dependencies:
"@types/async" "^3.2.12"
"@types/utf8" "^3.0.1"