diff --git a/config.json b/config.json index 565c191226..3d3d9dd9d3 100644 --- a/config.json +++ b/config.json @@ -88,7 +88,9 @@ } ], "defaultEncryptionKeyPerAccount": true, + "kmsHideScalityArn": false, "kmsAWS": { + "providerName": "aws", "region": "us-east-1", "endpoint": "http://127.0.0.1:8080", "ak": "tbd", diff --git a/lib/Config.js b/lib/Config.js index cbaf885824..280310613d 100644 --- a/lib/Config.js +++ b/lib/Config.js @@ -18,6 +18,13 @@ const { azureAccountNameRegex, base64Regex, const { utapiVersion } = require('utapi'); const { versioning } = require('arsenal'); const constants = require('../constants'); +const { + KmsType, + KmsProtocol, + isValidProvider, + isValidType, + isValidProtocol, +} = require('arsenal/build/lib/network/KMSInterface'); const versionIdUtils = versioning.VersionID; @@ -449,8 +456,9 @@ class Config extends EventEmitter { // Read config automatically this._getLocationConfig(); - this._getConfig(); + const config = this._getConfig(); this._configureBackends(); + this._sseMigration(config); } _parseKmsAWS(config) { @@ -459,13 +467,19 @@ class Config extends EventEmitter { } let kmsAWS = {}; - const { region, endpoint, ak, sk, tls } = config.kmsAWS; + const { providerName, region, endpoint, ak, sk, tls, noAwsArn } = config.kmsAWS; + assert(providerName, 'Configuration Error: providerName must be defined in kmsAWS'); + assert(isValidProvider(providerName), + 'Configuration Error: kmsAWS.providerNamer must be lowercase alphanumeric only'); assert(endpoint, 'Configuration Error: endpoint must be defined in kmsAWS'); assert(ak, 'Configuration Error: ak must be defined in kmsAWS'); assert(sk, 'Configuration Error: sk must be defined in kmsAWS'); + assert(['undefined', 'boolean'].some(type => type === typeof noAwsArn), + 'Configuration Error:: kmsAWS.noAwsArn must be a boolean or not set'); kmsAWS = { + providerName, endpoint, ak, sk, @@ -475,6 +489,10 @@ class Config extends EventEmitter { kmsAWS.region = region; } + if (noAwsArn) { + kmsAWS.noAwsArn = noAwsArn; + } + if (tls) { kmsAWS.tls = {}; if (tls.rejectUnauthorized !== undefined) { @@ -589,6 +607,10 @@ class Config extends EventEmitter { transport: this._parseKmipTransport({}), }; if (config.kmip) { + assert(config.kmip.providerName, 'config.kmip.providerName must be defined'); + assert(isValidProvider(config.kmip.providerName), + 'config.kmip.providerName must be lowercase alphanumeric only'); + this.kmip.providerName = config.kmip.providerName; if (config.kmip.client) { if (config.kmip.client.compoundCreateActivate) { assert(typeof config.kmip.client.compoundCreateActivate === @@ -1145,8 +1167,12 @@ class Config extends EventEmitter { this.kms = {}; if (config.kms) { + assert(config.kms.providerName, 'config.kms.providerName must be provided'); + assert(isValidProvider(config.kms.providerName), + 'config.kms.providerName must be lowercase alphanumeric only'); assert(typeof config.kms.userName === 'string'); assert(typeof config.kms.password === 'string'); + this.kms.providerName = config.kms.providerName; this.kms.userName = config.kms.userName; this.kms.password = config.kms.password; if (config.kms.helperProgram !== undefined) { @@ -1176,6 +1202,11 @@ class Config extends EventEmitter { assert(typeof this.defaultEncryptionKeyPerAccount === 'boolean', 'config.defaultEncryptionKeyPerAccount must be a boolean'); + this.kmsHideScalityArn = Object.hasOwnProperty.call(config, 'kmsHideScalityArn') + ? config.kmsHideScalityArn + : true; // By default hide scality arn to keep backward compatibility and simplicity + assert.strictEqual(typeof this.kmsHideScalityArn, 'boolean'); + this.healthChecks = defaultHealthChecks; if (config.healthChecks && config.healthChecks.allowFrom) { assert(config.healthChecks.allowFrom instanceof Array, @@ -1380,6 +1411,7 @@ class Config extends EventEmitter { 'bad config: maxScannedLifecycleListingEntries must be greater than 2'); this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries; } + return config; } _configureBackends() { @@ -1455,6 +1487,61 @@ class Config extends EventEmitter { }; } + _sseMigration(config) { + if (config.sseMigration) { + /** + * For data that was encrypted internally by default and a new external provider is setup. + * This config helps detect the existing encryption key to decrypt with the good provider. + * The key format will be migrated automatically on GET/HEADs to include provider details. + */ + this.sseMigration = {}; + const { previousKeyType, previousKeyProtocol, previousKeyProvider } = config.sseMigration; + if (!previousKeyType) { + assert.fail( + 'NotImplemented: No dynamic KMS key migration. Set sseMigration.previousKeyType'); + } + + // If previousKeyType is provided it's used as static value to migrate the format of the key + // without additional dynamic evaluation if the key provider is unknown. + assert(isValidType(previousKeyType), + 'ssenMigration.previousKeyType must be "internal" or "external"'); + this.sseMigration.previousKeyType = previousKeyType; + + let expectedProtocol; + if (previousKeyType === KmsType.internal) { + // For internal key type default protocol is file and provider is scality + this.sseMigration.previousKeyProtocol = previousKeyProtocol || KmsProtocol.file; + this.sseMigration.previousKeyProvider = previousKeyProvider || 'scality'; + expectedProtocol = [KmsProtocol.scality, KmsProtocol.mem, KmsProtocol.file]; + } else if (previousKeyType === KmsType.external) { + // No defaults allowed for external provider + assert(previousKeyProtocol, + 'sseMigration.previousKeyProtocol must be defined for external provider'); + this.sseMigration.previousKeyProtocol = previousKeyProtocol; + assert(previousKeyProvider, + 'sseMigration.previousKeyProvider must be defined for external provider'); + this.sseMigration.previousKeyProvider = previousKeyProvider; + expectedProtocol = [KmsProtocol.kmip, KmsProtocol.aws_kms]; + } + + assert(isValidProtocol(previousKeyType, this.sseMigration.previousKeyProtocol), + `sseMigration.previousKeyProtocol must be one of ${expectedProtocol}`); + assert(isValidProvider(previousKeyProvider), + 'sseMigration.previousKeyProvider must be lowercase alphanumeric only'); + + if (this.sseMigration.previousKeyType === KmsType.external) { + if ([KmsProtocol.file, KmsProtocol.mem].includes(this.backends.kms)) { + assert.fail( + `sseMigration.previousKeyType "external" can't migrate to "internal" KMS provider ${ + this.backends.kms}` + ); + } + // We'd have to compare protocol & providerName + assert.fail('sseMigration.previousKeyType "external" is not yet available'); + } + } + } + setAuthDataAccounts(accounts) { this.authData.accounts = accounts; this.emit('authdata-update'); diff --git a/lib/api/apiUtils/bucket/bucketEncryption.js b/lib/api/apiUtils/bucket/bucketEncryption.js index e44a4d3ecf..8916e96f1b 100644 --- a/lib/api/apiUtils/bucket/bucketEncryption.js +++ b/lib/api/apiUtils/bucket/bucketEncryption.js @@ -2,6 +2,7 @@ const { errors, errorInstances } = require('arsenal'); const metadata = require('../../../metadata/wrapper'); const kms = require('../../../kms/wrapper'); const { parseString } = require('xml2js'); +const { isScalityKmsArn } = require('arsenal/build/lib/network/KMSInterface'); /** * ServerSideEncryptionInfo - user configuration for server side encryption @@ -95,6 +96,12 @@ function parseEncryptionXml(xml, log, cb) { } result.configuredMasterKeyId = encConfig.KMSMasterKeyID[0]; + // If key is not in a scality arn format include a scality arn prefix + // of the currently selected KMS client. + // To keep track of KMS type, protocol and provider used + if (!isScalityKmsArn(result.configuredMasterKeyId)) { + result.configuredMasterKeyId = `${kms.arnPrefix}${result.configuredMasterKeyId}`; + } } return cb(null, result); }); @@ -119,7 +126,12 @@ function hydrateEncryptionConfig(algorithm, configuredMasterKeyId, mandatory = n const sseConfig = { algorithm, mandatory }; if (algorithm === 'aws:kms' && configuredMasterKeyId) { - sseConfig.configuredMasterKeyId = configuredMasterKeyId; + // If key is not in a scality arn format include a scality arn prefix + // of the currently selected KMS client. + // To keep track of KMS type, protocol and provider used + sseConfig.configuredMasterKeyId = isScalityKmsArn(configuredMasterKeyId) + ? configuredMasterKeyId + : `${kms.arnPrefix}${configuredMasterKeyId}`; } if (mandatory !== null) { diff --git a/lib/api/apiUtils/bucket/updateEncryption.js b/lib/api/apiUtils/bucket/updateEncryption.js new file mode 100644 index 0000000000..5db3152423 --- /dev/null +++ b/lib/api/apiUtils/bucket/updateEncryption.js @@ -0,0 +1,141 @@ +const { getVersionSpecificMetadataOptions } = require('../object/versioning'); +// const getReplicationInfo = require('../object/getReplicationInfo'); +const { config } = require('../../../Config'); +const kms = require('../../../kms/wrapper'); +const metadata = require('../../../metadata/wrapper'); +const { isScalityKmsArn, makeScalityArnPrefix } = require('arsenal/build/lib/network/KMSInterface'); + +// Bucket need a key from the new KMS, not a simple reformating +function updateBucketEncryption(bucket, log, cb) { + const sse = bucket.getServerSideEncryption(); + + if (!sse) { + return cb(null, bucket); + } + + const masterKey = sse.masterKeyId; + const configuredKey = sse.configuredMasterKeyId; + + // Note: if migration is from an external to an external, absence of arn is not enough + // a comparison of arn will be necessary but config validation blocks this for now + const updateMaster = masterKey && !isScalityKmsArn(masterKey); + const updateConfigured = configuredKey && !isScalityKmsArn(configuredKey); + + if (!updateMaster && !updateConfigured) { + return cb(null, bucket); + } + log.debug('trying to update bucket encryption', { oldKey: masterKey || configuredKey }); + // this should trigger vault account key update as well + return kms.createBucketKey(bucket, log, (err, newSse) => { + if (err) { + return cb(err, bucket); + } + // if both keys needs migration, it is ok the use the same KMS key + // as the configured one should be used and the only way to use the + // masterKeyId is to PutBucketEncryption to AES256 but then nothing + // will break and the same KMS key will continue to be used. + // And the key is managed (created) by Scality, not passed from input. + if (updateMaster) { + sse.masterKeyId = newSse.masterKeyArn; + } + if (updateConfigured) { + sse.configuredMasterKeyId = newSse.masterKeyArn; + } + // KMS account key will not be deleted when bucket is deleted + if (newSse.isAccountEncryptionEnabled) { + sse.isAccountEncryptionEnabled = newSse.isAccountEncryptionEnabled; + } + + log.info('updating bucket encryption', { + oldKey: masterKey || configuredKey, + newKey: newSse.masterKeyArn, + isAccount: newSse.isAccountEncryptionEnabled, + }); + return metadata.updateBucket(bucket.getName(), bucket, log, err => cb(err, bucket)); + }); +} + +// Only reformat the key, don't generate a new one. +// Use opts.skipObjectUpdate to only prepare objMD without sending the update to metadata +// if a metadata.putObjectMD is expected later in call flow. (Downside: update skipped if error) +function updateObjectEncryption(bucket, objMD, objectKey, log, keyArnPrefix, opts, cb) { + if (!objMD) { + return cb(null, bucket, objMD); + } + + const key = objMD['x-amz-server-side-encryption-aws-kms-key-id']; + + if (!key || isScalityKmsArn(key)) { + return cb(null, bucket, objMD); + } + const newKey = `${keyArnPrefix}${key}`; + // eslint-disable-next-line no-param-reassign + objMD['x-amz-server-side-encryption-aws-kms-key-id'] = newKey; + // Doesn't seem to be used but update as well + for (const dataLocator of objMD.location || []) { + if (dataLocator.masterKeyId) { + dataLocator.masterKeyId = `${keyArnPrefix}${dataLocator.masterKeyId}`; + } + } + // eslint-disable-next-line no-param-reassign + objMD.originOp = 's3:ObjectCreated:Copy'; + // Copy should be tested for 9.5 in INTGR-1038 + // to make sure it does not impact backbeat CRR / bucket notif + const params = getVersionSpecificMetadataOptions(objMD, config.nullVersionCompatMode); + + log.info('reformating object encryption key', { oldKey: key, newKey, skipUpdate: opts.skipObjectUpdate }); + if (opts.skipObjectUpdate) { + return cb(null, bucket, objMD); + } + return metadata.putObjectMD(bucket.getName(), objectKey, objMD, params, + log, err => cb(err, bucket, objMD)); +} + +/** + * Update encryption of bucket and object if kms provider changed + * + * @param {Error} err - error coming from metadata validate before the action handling + * @param {BucketInfo} bucket - bucket + * @param {Object} [objMD] - object metadata + * @param {string} objectKey - objectKey from request. + * @param {Logger} log - request logger + * @param {Object} opts - options for sseMigration + * @param {boolean} [opts.skipObject] - ignore object update + * @param {boolean} [opts.skipObjectUpdate] - don't update metadata but prepare objMD for later update + * @param {Function} cb - callback (err, bucket, objMD) + * @returns {undefined} + */ +function updateEncryption(err, bucket, objMD, objectKey, log, opts, cb) { + // Error passed here to call the function inbetween the metadataValidate and its callback + if (err) { + return cb(err); + } + // if objMD missing, still try updateBucketEncryption + if (!config.sseMigration) { + return cb(null, bucket, objMD); + } + + const { previousKeyType, previousKeyProtocol, previousKeyProvider } = config.sseMigration; + // previousKeyType is required and validated in Config.js + // for now it is the only implementation we need. + // See TAD Seamless decryption with internal and external KMS: https://scality.atlassian.net/wiki/x/EgADu + // for other method of migration without a previousKeyType + + const keyArnPrefix = makeScalityArnPrefix(previousKeyType, previousKeyProtocol, previousKeyProvider); + + return updateBucketEncryption(bucket, log, (err, bucket) => { + // Any error in updating encryption at bucket or object level is returned to client. + // Other possibilities: ignore error, include sse migration notice in error message. + if (err) { + return cb(err, bucket, objMD); + } + if (opts.skipObject) { + return cb(err, bucket, objMD); + } + return updateObjectEncryption(bucket, objMD, objectKey, log, keyArnPrefix, opts, cb); + }); +} + +module.exports = { + updateEncryption, +}; diff --git a/lib/api/apiUtils/object/sseHeaders.js b/lib/api/apiUtils/object/sseHeaders.js new file mode 100644 index 0000000000..8ed85a828e --- /dev/null +++ b/lib/api/apiUtils/object/sseHeaders.js @@ -0,0 +1,18 @@ +const { config } = require('../../../Config'); +const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); + +function setSSEHeaders(headers, algo, kmsKey) { + if (algo) { + // eslint-disable-next-line no-param-reassign + headers['x-amz-server-side-encryption'] = algo; + if (kmsKey && algo === 'aws:kms') { + // eslint-disable-next-line no-param-reassign + headers['x-amz-server-side-encryption-aws-kms-key-id'] = + config.kmsHideScalityArn ? getKeyIdFromArn(kmsKey) : kmsKey; + } + } +} + +module.exports = { + setSSEHeaders, +}; diff --git a/lib/api/bucketGetEncryption.js b/lib/api/bucketGetEncryption.js index 34df016758..db5d31432b 100644 --- a/lib/api/bucketGetEncryption.js +++ b/lib/api/bucketGetEncryption.js @@ -6,6 +6,8 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const escapeForXml = s3middleware.escapeForXml; +const { config } = require('../Config'); +const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); /** * Bucket Get Encryption - Get bucket SSE configuration @@ -60,7 +62,11 @@ function bucketGetEncryption(authInfo, request, log, callback) { ]; if (sseInfo.configuredMasterKeyId) { - xml.push(`${escapeForXml(sseInfo.configuredMasterKeyId)}`); + xml.push(`${escapeForXml( + config.kmsHideScalityArn + ? getKeyIdFromArn(sseInfo.configuredMasterKeyId) + : sseInfo.configuredMasterKeyId + )}`); } xml.push( diff --git a/lib/api/completeMultipartUpload.js b/lib/api/completeMultipartUpload.js index 0f3ca78469..a31c195c07 100644 --- a/lib/api/completeMultipartUpload.js +++ b/lib/api/completeMultipartUpload.js @@ -22,6 +22,7 @@ const locationConstraintCheck const locationKeysHaveChanged = require('./apiUtils/object/locationKeysHaveChanged'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); +const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); const versionIdUtils = versioning.VersionID; @@ -317,14 +318,15 @@ function completeMultipartUpload(authInfo, request, log, callback) { if (storedMetadata.legalHold) { metaStoreParams.legalHold = storedMetadata.legalHold; } - const serverSideEncryption = - destBucket.getServerSideEncryption(); + const serverSideEncryption = storedMetadata['x-amz-server-side-encryption']; let pseudoCipherBundle = null; if (serverSideEncryption) { + const kmsKey = storedMetadata['x-amz-server-side-encryption-aws-kms-key-id']; pseudoCipherBundle = { - algorithm: destBucket.getSseAlgorithm(), - masterKeyId: destBucket.getSseMasterKeyId(), + algorithm: serverSideEncryption, + masterKeyId: kmsKey, }; + setSSEHeaders(responseHeaders, serverSideEncryption, kmsKey); } return versioningPreprocessing(bucketName, destBucket, objectKey, objMD, log, (err, options) => { diff --git a/lib/api/initiateMultipartUpload.js b/lib/api/initiateMultipartUpload.js index 76801625e1..7bfd3cd66a 100644 --- a/lib/api/initiateMultipartUpload.js +++ b/lib/api/initiateMultipartUpload.js @@ -20,6 +20,8 @@ const { validateHeaders, compareObjectLockInformation } = const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { data } = require('../data/wrapper'); +const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); +const { updateEncryption } = require('./apiUtils/bucket/updateEncryption'); /* Sample xml response: @@ -179,6 +181,10 @@ function initiateMultipartUpload(authInfo, request, log, callback) { }, }); + setSSEHeaders(corsHeaders, + mpuMD['x-amz-server-side-encryption'], + mpuMD['x-amz-server-side-encryption-aws-kms-key-id']); + monitoring.promMetrics('PUT', bucketName, '200', 'initiateMultipartUpload'); return callback(null, xml, corsHeaders); @@ -189,9 +195,13 @@ function initiateMultipartUpload(authInfo, request, log, callback) { function _storetheMPObject(destinationBucket, corsHeaders, serverSideEncryption) { let cipherBundle = null; if (serverSideEncryption) { + const { algorithm, configuredMasterKeyId, masterKeyId } = serverSideEncryption; + if (configuredMasterKeyId) { + log.debug('using user configured kms master key id'); + } cipherBundle = { - algorithm: serverSideEncryption.algorithm, - masterKeyId: serverSideEncryption.masterKeyId, + algorithm, + masterKeyId: configuredMasterKeyId || masterKeyId, }; } const backendInfoObj = locationConstraintCheck(request, null, @@ -263,6 +273,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) { async.waterfall([ next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, + (error, destinationBucket, destObjMD) => + updateEncryption(error, destinationBucket, destObjMD, objectKey, log, { skipObject: true }, (error, destinationBucket) => { const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); @@ -275,7 +287,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) { return next(error, corsHeaders); } return next(null, corsHeaders, destinationBucket); - }), + })), (corsHeaders, destinationBucket, next) => { if (destinationBucket.hasDeletedFlag() && accountCanonicalID !== destinationBucket.getOwner()) { log.trace('deleted flag on bucket and request from non-owner account'); diff --git a/lib/api/objectCopy.js b/lib/api/objectCopy.js index c2b26fbfc7..bf3477e411 100644 --- a/lib/api/objectCopy.js +++ b/lib/api/objectCopy.js @@ -22,6 +22,8 @@ const { config } = require('../Config'); const monitoring = require('../utilities/metrics'); const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); +const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); +const { updateEncryption } = require('./apiUtils/bucket/updateEncryption'); const versionIdUtils = versioning.VersionID; const locationHeader = constants.objectLocationConstraintHeader; @@ -249,6 +251,8 @@ function objectCopy(authInfo, request, sourceBucket, return async.waterfall([ function checkDestAuth(next) { return standardMetadataValidateBucketAndObj(valPutParams, request.actionImplicitDenies, log, + (err, destBucketMD, destObjMD) => + updateEncryption(err, destBucketMD, destObjMD, destObjectKey, log, { skipObject: true }, (err, destBucketMD, destObjMD) => { if (err) { log.debug('error validating put part of request', @@ -263,7 +267,7 @@ function objectCopy(authInfo, request, sourceBucket, return next(errors.NoSuchBucket); } return next(null, destBucketMD, destObjMD); - }); + })); }, function checkSourceAuthorization(destBucketMD, destObjMD, next) { return standardMetadataValidateBucketAndObj(valGetParams, request.actionImplicitDenies, log, @@ -536,13 +540,10 @@ function objectCopy(authInfo, request, sourceBucket, ].join(''); const additionalHeaders = corsHeaders || {}; if (serverSideEncryption) { - additionalHeaders['x-amz-server-side-encryption'] = - serverSideEncryption.algorithm; - if (serverSideEncryption.algorithm === 'aws:kms') { - additionalHeaders[ - 'x-amz-server-side-encryption-aws-kms-key-id'] = - serverSideEncryption.masterKeyId; - } + setSSEHeaders(additionalHeaders, + serverSideEncryption.algorithm, + serverSideEncryption.configuredMasterKeyId || serverSideEncryption.masterKeyId + ); } if (sourceVersionId) { additionalHeaders['x-amz-copy-source-version-id'] = diff --git a/lib/api/objectGet.js b/lib/api/objectGet.js index 7fab417a57..afd68f11dd 100644 --- a/lib/api/objectGet.js +++ b/lib/api/objectGet.js @@ -15,6 +15,7 @@ const monitoring = require('../utilities/metrics'); const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const kms = require('../kms/wrapper'); +const { updateEncryption } = require('./apiUtils/bucket/updateEncryption'); const validateHeaders = s3middleware.validateConditionalHeaders; @@ -53,6 +54,7 @@ function objectGet(authInfo, request, returnTagCount, log, callback) { }; return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, + (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, {}, (err, bucket, objMD) => { const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); @@ -292,7 +294,7 @@ function objectGet(authInfo, request, returnTagCount, log, callback) { }); } ); - }); + })); } module.exports = objectGet; diff --git a/lib/api/objectHead.js b/lib/api/objectHead.js index 7e8d1f6759..eae8a17bab 100644 --- a/lib/api/objectHead.js +++ b/lib/api/objectHead.js @@ -14,6 +14,7 @@ const { getPartNumber, getPartSize, getPartCountFromMd5 } = const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { maximumAllowedPartCount } = require('../../constants'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); +const { updateEncryption } = require('./apiUtils/bucket/updateEncryption'); /** * HEAD Object - Same as Get Object but only respond with headers @@ -51,6 +52,7 @@ function objectHead(authInfo, request, log, callback) { }; return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, + (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, {}, (err, bucket, objMD) => { const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, bucket); @@ -160,7 +162,7 @@ function objectHead(authInfo, request, log, callback) { }); monitoring.promMetrics('HEAD', bucketName, '200', 'headObject'); return callback(null, responseHeaders); - }); + })); } module.exports = objectHead; diff --git a/lib/api/objectPut.js b/lib/api/objectPut.js index 81174c8ffc..8274ecbd62 100644 --- a/lib/api/objectPut.js +++ b/lib/api/objectPut.js @@ -21,6 +21,7 @@ const writeContinue = require('../utilities/writeContinue'); const { overheadField } = require('../../constants'); const versionIdUtils = versioning.VersionID; +const { updateEncryption } = require('./apiUtils/bucket/updateEncryption'); /** * PUT Object in the requested bucket. Steps include: @@ -81,6 +82,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) { log.trace('owner canonicalID to send to data', { canonicalID }); return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log, + (err, bucket, objMD) => updateEncryption(err, bucket, objMD, objectKey, log, { skipObject: true }, (err, bucket, objMD) => { const responseHeaders = collectCorsHeaders(headers.origin, method, bucket); @@ -204,7 +206,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) { null, ingestSize); return callback(null, responseHeaders); }); - }); + })); } module.exports = objectPut; diff --git a/lib/api/objectPutCopyPart.js b/lib/api/objectPutCopyPart.js index 8013e3eca2..289da60d8d 100644 --- a/lib/api/objectPutCopyPart.js +++ b/lib/api/objectPutCopyPart.js @@ -13,6 +13,7 @@ const services = require('../services'); const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const monitoring = require('../utilities/metrics'); +const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); const versionIdUtils = versioning.VersionID; const { config } = require('../Config'); @@ -237,9 +238,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket, } const destObjLocationConstraint = res.controllingLocationConstraint; + const sseAlgo = res['x-amz-server-side-encryption']; + const sse = sseAlgo ? { + algorithm: sseAlgo, + masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'], + } : null; return next(null, dataLocator, destBucketMD, destObjLocationConstraint, copyObjectSize, - sourceVerId, sourceLocationConstraintName, splitter); + sourceVerId, sourceLocationConstraintName, sse, splitter); }); }, function goGetData( @@ -249,6 +255,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket, copyObjectSize, sourceVerId, sourceLocationConstraintName, + sse, splitter, next, ) { @@ -264,6 +271,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket, dataLocator, dataStoreContext, locationConstraintCheck, + sse, (error, eTag, lastModified, serverSideEncryption, locations) => { // eslint-disable-next-line no-param-reassign request.actionImplicitDenies = originalIdentityAuthzResults; @@ -418,12 +426,9 @@ function objectPutCopyPart(authInfo, request, sourceBucket, const additionalHeaders = corsHeaders || {}; if (serverSideEncryption) { - additionalHeaders['x-amz-server-side-encryption'] = - serverSideEncryption.algorithm; - if (serverSideEncryption.algorithm === 'aws:kms') { - additionalHeaders['x-amz-server-side-encryption-aws-kms-key-id'] - = serverSideEncryption.masterKeyId; - } + setSSEHeaders(additionalHeaders, + serverSideEncryption.algorithm, + serverSideEncryption.masterKeyId); } additionalHeaders['x-amz-copy-source-version-id'] = sourceVerId; pushMetric('uploadPartCopy', log, { diff --git a/lib/api/objectPutPart.js b/lib/api/objectPutPart.js index 11e3817d96..7c0403e6b1 100644 --- a/lib/api/objectPutPart.js +++ b/lib/api/objectPutPart.js @@ -17,8 +17,9 @@ const locationConstraintCheck = require('./apiUtils/object/locationConstraintCheck'); const monitoring = require('../utilities/metrics'); const writeContinue = require('../utilities/writeContinue'); -const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); +const { parseObjectEncryptionHeaders } = require('./apiUtils/bucket/bucketEncryption'); const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders'); +const { setSSEHeaders } = require('./apiUtils/object/sseHeaders'); const skipError = new Error('skip'); @@ -130,29 +131,21 @@ function objectPutPart(authInfo, request, streamingV4Params, log, } return next(null, destinationBucket); }, - // Get bucket server-side encryption, if it exists. - (destinationBucket, next) => getObjectSSEConfiguration( - request.headers, destinationBucket, log, - (err, sseConfig) => next(err, destinationBucket, sseConfig)), - (destinationBucket, encryption, next) => { - // If bucket has server-side encryption, pass the `res` value - if (encryption) { - return kms.createCipherBundle(encryption, log, (err, res) => { - if (err) { - log.error('error processing the cipher bundle for ' + - 'the destination bucket', { - error: err, - }); - return next(err, destinationBucket); - } - return next(null, destinationBucket, res); - }); + // Validate that no object SSE is provided for part. + // Part must use SSE from initiateMPU (overview in metadata) + (destinationBucket, next) => { + const { error, objectSSE } = parseObjectEncryptionHeaders(request.headers); + if (error) { + return next(error, destinationBucket); + } + if (objectSSE.algorithm) { + return next(errors.InvalidArgument.customizeDescription( + 'x-amz-server-side-encryption header is not supported for this operation.')); } - // The bucket does not have server-side encryption, so pass `null` - return next(null, destinationBucket, null); + return next(null, destinationBucket); }, // Get the MPU shadow bucket. - (destinationBucket, cipherBundle, next) => + (destinationBucket, next) => metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => { if (err && err.is.NoSuchBucket) { @@ -170,10 +163,10 @@ function objectPutPart(authInfo, request, streamingV4Params, log, if (mpuBucket.getMdBucketModelVersion() < 2) { splitter = constants.oldSplitter; } - return next(null, destinationBucket, cipherBundle, splitter); + return next(null, destinationBucket, splitter); }), // Check authorization of the MPU shadow bucket. - (destinationBucket, cipherBundle, splitter, next) => { + (destinationBucket, splitter, next) => { const mpuOverviewKey = _getOverviewKey(splitter, objectKey, uploadId); return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log, @@ -194,11 +187,36 @@ function objectPutPart(authInfo, request, streamingV4Params, log, const objectLocationConstraint = res.controllingLocationConstraint; + const sseAlgo = res['x-amz-server-side-encryption']; + const sse = sseAlgo ? { + algorithm: sseAlgo, + masterKeyId: res['x-amz-server-side-encryption-aws-kms-key-id'], + } : null; return next(null, destinationBucket, objectLocationConstraint, - cipherBundle, splitter); + sse, splitter); }); }, + // Use MPU overview SSE config + (destinationBucket, objectLocationConstraint, encryption, splitter, next) => { + // If MPU has server-side encryption, pass the `res` value + if (encryption) { + return kms.createCipherBundle(encryption, log, (err, res) => { + if (err) { + log.error('error processing the cipher bundle for ' + + 'the destination bucket', { + error: err, + }); + return next(err, destinationBucket); + } + return next(null, destinationBucket, objectLocationConstraint, res, splitter); + // Allow KMS to use a key from previous provider (if sseMigration configured) + // Because ongoing MPU started before sseMigration is no migrated + }, { previousOk: true }); + } + // The MPU does not have server-side encryption, so pass `null` + return next(null, destinationBucket, objectLocationConstraint, null, splitter); + }, // If data backend is backend that handles mpu (like real AWS), // no need to store part info in metadata (destinationBucket, objectLocationConstraint, cipherBundle, @@ -221,6 +239,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log, return next(err, destinationBucket); } // if data backend handles mpu, skip to end of waterfall + // TODO CLDSRV-640 (artesca) data backend should return SSE to include in response headers if (partInfo && partInfo.dataStoreType === 'aws_s3') { return next(skipError, destinationBucket, partInfo.dataStoreETag); @@ -303,6 +322,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log, // Use an array to be consistent with objectPutCopyPart where there // could be multiple locations. const partLocations = [dataGetInfo]; + const sseHeaders = {}; if (cipherBundle) { const { algorithm, masterKeyId, cryptoScheme, cipheredDataKey } = cipherBundle; @@ -310,6 +330,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log, partLocations[0].sseMasterKeyId = masterKeyId; partLocations[0].sseCryptoScheme = cryptoScheme; partLocations[0].sseCipheredDataKey = cipheredDataKey; + sseHeaders.algo = algorithm; + sseHeaders.kmsKey = masterKeyId; } const omVal = { // back to Version 3 since number-subparts is not needed @@ -332,14 +354,14 @@ function objectPutPart(authInfo, request, streamingV4Params, log, return next(err, destinationBucket); } return next(null, partLocations, oldLocations, objectLocationConstraint, - destinationBucket, hexDigest, prevObjectSize, splitter); + destinationBucket, hexDigest, sseHeaders, prevObjectSize, splitter); }); }, (partLocations, oldLocations, objectLocationConstraint, destinationBucket, - hexDigest, prevObjectSize, splitter, next) => { + hexDigest, sseHeaders, prevObjectSize, splitter, next) => { if (!oldLocations) { return next(null, oldLocations, objectLocationConstraint, - destinationBucket, hexDigest, prevObjectSize); + destinationBucket, hexDigest, sseHeaders, prevObjectSize); } return services.isCompleteMPUInProgress({ bucketName, @@ -367,13 +389,13 @@ function objectPutPart(authInfo, request, streamingV4Params, log, oldLocationsToDelete = null; } return next(null, oldLocationsToDelete, objectLocationConstraint, - destinationBucket, hexDigest, prevObjectSize); + destinationBucket, hexDigest, sseHeaders, prevObjectSize); }); }, // Clean up any old data now that new metadata (with new // data locations) has been stored. (oldLocationsToDelete, objectLocationConstraint, destinationBucket, hexDigest, - prevObjectSize, next) => { + sseHeaders, prevObjectSize, next) => { if (oldLocationsToDelete) { log.trace('overwriting mpu part, deleting data'); return data.batchDelete(oldLocationsToDelete, request.method, @@ -386,17 +408,20 @@ function objectPutPart(authInfo, request, streamingV4Params, log, { error: err }); } return next(null, destinationBucket, hexDigest, - prevObjectSize); + sseHeaders, prevObjectSize); }); } return next(null, destinationBucket, hexDigest, - prevObjectSize); + sseHeaders, prevObjectSize); }, - ], (err, destinationBucket, hexDigest, prevObjectSize) => { + ], (err, destinationBucket, hexDigest, sseHeaders, prevObjectSize) => { const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); // eslint-disable-next-line no-param-reassign request.actionImplicitDenies = originalIdentityAuthzResults; + if (sseHeaders) { + setSSEHeaders(corsHeaders, sseHeaders.algo, sseHeaders.kmsKey); + } if (err) { if (err === skipError) { return cb(null, hexDigest, corsHeaders); diff --git a/lib/kms/file/backend.js b/lib/kms/file/backend.js index c841f9aa82..5f430b21ad 100644 --- a/lib/kms/file/backend.js +++ b/lib/kms/file/backend.js @@ -1,10 +1,14 @@ const Common = require('../common'); +const { KmsType, KmsProtocol, getKeyIdFromArn, makeBackend } = require('arsenal/build/lib/network/KMSInterface'); + +const kmsBackend = makeBackend(KmsType.internal, KmsProtocol.file, 'scality'); const backend = { /* * Target implementation will be async. let's mimic it */ + backend: kmsBackend, /** * @@ -19,7 +23,7 @@ const backend = { // Using createDataKey here for purposes of createBucketKeyMem // so that we do not need a separate function. const newKey = Common.createDataKey().toString('hex'); - cb(null, newKey); + cb(null, newKey, `${kmsBackend.arnPrefix}${newKey}`); }); }, @@ -43,7 +47,7 @@ const backend = { /** * * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyId - master key; for the file backend + * @param {string} masterKeyIdOrArn - master key; for the file backend * the master key is the actual bucket master key rather than the key to * retrieve the actual key from a dictionary * @param {buffer} plainTextDataKey - data key @@ -53,11 +57,12 @@ const backend = { * @callback called with (err, cipheredDataKey: Buffer) */ cipherDataKey: function cipherDataKeyMem(cryptoScheme, - masterKeyId, + masterKeyIdOrArn, plainTextDataKey, log, cb) { process.nextTick(() => { + const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); const masterKey = Buffer.from(masterKeyId, 'hex'); Common.createCipher( cryptoScheme, masterKey, 0, log, @@ -84,7 +89,7 @@ const backend = { /** * * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyId - master key; for the file backend + * @param {string} masterKeyIdOrArn - master key; for the file backend * the master key is the actual bucket master key rather than the key to * retrieve the actual key from a dictionary * @param {buffer} cipheredDataKey - data key @@ -94,11 +99,12 @@ const backend = { * @callback called with (err, plainTextDataKey: Buffer) */ decipherDataKey: function decipherDataKeyMem(cryptoScheme, - masterKeyId, + masterKeyIdOrArn, cipheredDataKey, log, cb) { process.nextTick(() => { + const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); const masterKey = Buffer.from(masterKeyId, 'hex'); Common.createDecipher( cryptoScheme, masterKey, 0, log, diff --git a/lib/kms/in_memory/backend.js b/lib/kms/in_memory/backend.js index 2a694e28b2..609ab17333 100644 --- a/lib/kms/in_memory/backend.js +++ b/lib/kms/in_memory/backend.js @@ -1,13 +1,18 @@ const Common = require('../common'); +const { KmsType, KmsProtocol, getKeyIdFromArn, makeBackend } = require('arsenal/build/lib/network/KMSInterface'); const kms = []; let count = 1; +const kmsBackend = makeBackend(KmsType.internal, KmsProtocol.mem, 'scality'); + const backend = { /* * Target implementation will be async. let's mimic it */ + backend: kmsBackend, + supportsDefaultKeyPerAccount: false, /** @@ -23,20 +28,22 @@ const backend = { // Using createDataKey here for purposes of createBucketKeyMem // so that we do not need a separate function. kms[count] = Common.createDataKey(); - cb(null, (count++).toString()); + const keyId = (count++).toString(); + cb(null, keyId, `${kmsBackend.arnPrefix}${keyId}`); }); }, /** * - * @param {string} bucketKeyId - the Id of the bucket key + * @param {string} bucketKeyIdOrArn - the Id of the bucket key * @param {object} log - logger object * @param {function} cb - callback * @returns {undefined} * @callback called with (err) */ - destroyBucketKey: function destroyBucketKeyMem(bucketKeyId, log, cb) { + destroyBucketKey: function destroyBucketKeyMem(bucketKeyIdOrArn, log, cb) { process.nextTick(() => { + const bucketKeyId = getKeyIdFromArn(bucketKeyIdOrArn); kms[bucketKeyId] = undefined; cb(null); }); @@ -45,7 +52,7 @@ const backend = { /** * * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyId - key to retrieve master key + * @param {string} masterKeyIdOrArn - key to retrieve master key * @param {buffer} plainTextDataKey - data key * @param {object} log - logger object * @param {function} cb - callback @@ -53,11 +60,12 @@ const backend = { * @callback called with (err, cipheredDataKey: Buffer) */ cipherDataKey: function cipherDataKeyMem(cryptoScheme, - masterKeyId, + masterKeyIdOrArn, plainTextDataKey, log, cb) { process.nextTick(() => { + const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); Common.createCipher( cryptoScheme, kms[masterKeyId], 0, log, (err, cipher) => { @@ -83,7 +91,7 @@ const backend = { /** * * @param {number} cryptoScheme - crypto scheme version number - * @param {string} masterKeyId - key to retrieve master key + * @param {string} masterKeyIdOrArn - key to retrieve master key * @param {buffer} cipheredDataKey - data key * @param {object} log - logger object * @param {function} cb - callback @@ -91,11 +99,12 @@ const backend = { * @callback called with (err, plainTextDataKey: Buffer) */ decipherDataKey: function decipherDataKeyMem(cryptoScheme, - masterKeyId, + masterKeyIdOrArn, cipheredDataKey, log, cb) { process.nextTick(() => { + const masterKeyId = getKeyIdFromArn(masterKeyIdOrArn); Common.createDecipher( cryptoScheme, kms[masterKeyId], 0, log, (err, decipher) => { diff --git a/lib/kms/wrapper.js b/lib/kms/wrapper.js index 0e15a478c1..6a44175d7d 100644 --- a/lib/kms/wrapper.js +++ b/lib/kms/wrapper.js @@ -13,6 +13,13 @@ const Common = require('./common'); const vault = require('../auth/vault'); const Cache = require('./Cache'); const cache = new Cache(); +const { + KmsProtocol, + makeBackend, + isScalityKmsArn, + extractDetailFromArn, + validateKeyDetail, +} = require('arsenal/build/lib/network/KMSInterface'); function getScalityKms() { let scalityKMS; @@ -33,37 +40,132 @@ function getScalityKms() { return { scalityKMS, scalityKMSImpl }; } -let client; -let implName; - -if (config.backends.kms === 'mem') { - client = inMemory; - implName = 'memoryKms'; -} else if (config.backends.kms === 'file' || config.backends.kms === 'cdmi') { - client = file; - implName = 'fileKms'; -} else if (config.backends.kms === 'scality') { - ({ scalityKMS: client, scalityKMSImpl: implName } = getScalityKms()); -} else if (config.backends.kms === 'kmip') { - const kmipConfig = { kmip: config.kmip }; - if (!kmipConfig.kmip) { - throw new Error('KMIP KMS driver configuration is missing.'); +const kmsFactory = { + mem: () => ({ client: inMemory, implName: 'memoryKms' }), + file: () => ({ client: file, implName: 'fileKms' }), + cdmi: () => ({ client: file, implName: 'fileKms' }), + scality: () => { + const { scalityKMS, scalityKMSImpl } = getScalityKms(); + return { client: scalityKMS, implName: scalityKMSImpl }; + }, + kmip: () => { + if (!config.kmip) { + throw new Error('KMIP KMS driver configuration is missing.'); + } + const client = Array.isArray(config.kmip.transport) + ? new KMIPClusterClient({ kmip: config.kmip }) + : new KMIPClient({ kmip: config.kmip }); + return { client, implName: 'kmip' }; + }, + aws: () => { + if (!config.kmsAWS) { + throw new Error('AWS KMS driver configuration is missing.'); + } + return { client: new KmsAWSClient({ kmsAWS: config.kmsAWS }), implName: 'aws' }; + }, +}; + +function getClient(kms) { + const impl = kmsFactory[kms]; + if (!impl) { + throw new Error(`KMS backend is not configured: ${kms}`); } - if (Array.isArray(config.kmip.transport)) { - client = new KMIPClusterClient(kmipConfig); + return impl(); +} + +/** + * Note: non current instance from previous keys won't be healthchecked + * `{ [`type:protocol:provider`]: clientDetails }` + */ +const clientInstances = {}; + +const { client, implName } = getClient(config.backends.kms); +const { type, protocol, provider } = client.backend; +const currentIdentifier = `${type}:${protocol}:${provider}`; +clientInstances[currentIdentifier] = { client, implName }; + +const availableBackends = [client.backend]; + +const mapKmsProtocolToClient = { + [KmsProtocol.aws_kms]: 'aws', + // others already match +}; + +let previousBackend; +let previousIdentifier; + +if (config.sseMigration) { + previousBackend = makeBackend( + config.sseMigration.previousKeyType, + config.sseMigration.previousKeyProtocol, + config.sseMigration.previousKeyProvider + ); + availableBackends.push(previousBackend); + previousIdentifier = `${previousBackend.type + }:${previousBackend.protocol + }:${previousBackend.provider}`; + + // Pre instantiate previous backend as for now only internal backend (file) is supported + // for future multiple external backend we should consider keeping open connection to + // external backend, healthcheck and idle timeout (if migration is finished) + // a config flag could help toggle this behavior + const previousKms = mapKmsProtocolToClient[previousBackend.protocol] || previousBackend.protocol; + const previousInstance = getClient(previousKms); + clientInstances[previousIdentifier] = previousInstance; +} + +/** + * Extract backend provider from key, validate arn for errors. + * @param {string} key KeyId or KeyArn + * @param {object} log logger + * @returns {object} error or client with extracted KeyId + */ +function getClientForKey(key, log) { + // if extraction only return the id, it is not a scality arnPrefix + const detail = extractDetailFromArn(key); + let clientIdentifier; + if (detail.type) { + // if type was extracted, it is a scality arnPrefix, it needs validation + // might throw if arn malformed or backend not available + // for any request (PUT or GET) + const error = validateKeyDetail(detail, availableBackends); + if (error) { + log.error('KMS key arn is invalid', { key, detail, availableBackends }); + return { error }; + } + clientIdentifier = `${detail.type}:${detail.protocol}:${detail.provider}`; + } else if (config.sseMigration) { + // if not a scality arnPrefix but migration from previous KMS + clientIdentifier = previousIdentifier; } else { - client = new KMIPClient(kmipConfig); + // if not a scality arnPrefix and no migration + clientIdentifier = currentIdentifier; } - implName = 'kmip'; -} else if (config.backends.kms === 'aws') { - const awsConfig = { kmsAWS: config.kmsAWS }; - client = new KmsAWSClient(awsConfig); - implName = 'aws'; -} else { - throw new Error(`KMS backend is not configured: ${config.backends.kms}`); + + const instance = clientInstances[clientIdentifier]; + + if (instance) { + // was already instantiated + // return the extracted key id to avoid further processing of potential arn + // return clientIdentifier to allow usage restriction + return { ...instance, clientIdentifier, key: detail.id }; + } + + // Only pre instantiated previous KMS from sseMigration is supported now + // Here we could instantiate other provider on the fly to manage multi providers + log.error('KMS key doesn\'t match any KMS instance', { key, detail, availableBackends }); + return { error: new errors.InvalidArgument + // eslint-disable-next-line new-cap + .customizeDescription(`KMS unknown provider for key ${key}`), + }; } class KMS { + /** Used for keys from current client */ + static get arnPrefix() { + return client.backend.arnPrefix; + } + /** * Create a new bucket encryption key. * @@ -76,9 +178,10 @@ class KMS { * @param {object} log - logger object * @param {function} cb - callback * @returns {undefined} - * @callback called with (err, { masterKeyId: string, isAccountEncryptionEnabled: boolean }) + * @callback called with (err, { masterKeyId: string, masterKeyArn: string, isAccountEncryptionEnabled: boolean }) */ static createBucketKey(bucket, log, cb) { + // always use current client for create log.debug('creating a new bucket key'); // Check if the client supports the use of a default master encryption key per account // and one is configured. @@ -95,17 +198,22 @@ class KMS { const { encryptionKeyId, action } = data; log.trace('default encryption key retrieved or created at the account level from vault', { implName, encryptionKeyId, action }); - return cb(null, { masterKeyId: encryptionKeyId, isAccountEncryptionEnabled: true }); + return cb(null, { + // vault only return arn + masterKeyId: encryptionKeyId, + masterKeyArn: encryptionKeyId, + isAccountEncryptionEnabled: true, + }); }); } // Otherwise, create a default master encryption key, later its id will be stored at the bucket metadata level. - return client.createBucketKey(bucket.getName(), log, (err, masterKeyId) => { + return client.createBucketKey(bucket.getName(), log, (err, masterKeyId, masterKeyArn) => { if (err) { log.debug('error from kms', { implName, error: err }); return cb(err); } log.trace('bucket key created in kms'); - return cb(null, { masterKeyId }); + return cb(null, { masterKeyId, masterKeyArn }); }); } @@ -136,7 +244,19 @@ class KMS { }; if (algorithm === 'aws:kms' && configuredMasterKeyId) { - serverSideEncryptionInfo.configuredMasterKeyId = configuredMasterKeyId; + // If input key is scality arn format it needs validation + // otherwise prepend the current KMS client arnPrefix + if (isScalityKmsArn(configuredMasterKeyId)) { + const detail = extractDetailFromArn(configuredMasterKeyId); + const error = validateKeyDetail(detail, availableBackends); + if (error) { + return cb(error); + } + serverSideEncryptionInfo.configuredMasterKeyId = configuredMasterKeyId; + } else { + serverSideEncryptionInfo.configuredMasterKeyId = + `${client.backend.arnPrefix}${configuredMasterKeyId}`; + } return process.nextTick(() => cb(null, serverSideEncryptionInfo)); } @@ -146,8 +266,8 @@ class KMS { return cb(err); } - const { masterKeyId, isAccountEncryptionEnabled } = data; - serverSideEncryptionInfo.masterKeyId = masterKeyId; + const { masterKeyId, masterKeyArn, isAccountEncryptionEnabled } = data; + serverSideEncryptionInfo.masterKeyId = masterKeyArn || masterKeyId; if (isAccountEncryptionEnabled) { serverSideEncryptionInfo.isAccountEncryptionEnabled = isAccountEncryptionEnabled; @@ -172,7 +292,12 @@ class KMS { */ static destroyBucketKey(bucketKeyId, log, cb) { log.debug('deleting bucket key', { bucketKeyId }); - client.destroyBucketKey(bucketKeyId, log, err => { + // shadowing global client for key + const { error, client, implName, key } = getClientForKey(bucketKeyId, log); + if (error) { + return cb(error); + } + return client.destroyBucketKey(key, log, err => { if (err) { log.debug('error from kms', { implName, error: err }); return cb(err); @@ -195,11 +320,13 @@ class KMS { * true for mandatory encryption * @param {object} log - logger object * @param {function} cb - cb from external call + * @param {object} [opts] - additional options + * @param {boolean} [opts.previousOk] - allow usage of previous KMS (for ongoing MPU not migrated) * @returns {undefined} * @callback called with (err, cipherBundle) */ static createCipherBundle(serverSideEncryptionInfo, - log, cb) { + log, cb, opts) { const { algorithm, configuredMasterKeyId, masterKeyId: bucketMasterKeyId } = serverSideEncryptionInfo; let masterKeyId = bucketMasterKeyId; @@ -207,16 +334,32 @@ class KMS { log.debug('using user configured kms master key id'); masterKeyId = configuredMasterKeyId; } + // shadowing global client for key + // but should not happen to cipher for another client as Puts should use current KMS + // still extract KeyId and validate arn + const { error, client, implName, clientIdentifier, key } = getClientForKey(masterKeyId, log); + if (error) { + return cb(error); + } + if (previousIdentifier + && clientIdentifier === previousIdentifier + && clientIdentifier !== currentIdentifier + && (opts && !opts.previousOk) + ) { + return cb(errors.InvalidArgument + .customizeDescription( + 'KMS cannot use previous provider to encrypt new objects if a new provider is configured')); + } const cipherBundle = { algorithm, - masterKeyId, + masterKeyId, // keep arnPrefix in cipherBundle as it is returned to callback cryptoScheme: 1, cipheredDataKey: null, cipher: null, }; - async.waterfall([ + return async.waterfall([ function generateDataKey(next) { /* There are 2 ways of generating a datakey : - using the generateDataKey of the KMS backend if it exists @@ -231,7 +374,7 @@ class KMS { if (client.generateDataKey) { log.debug('creating a data key using the KMS'); res = client.generateDataKey(cipherBundle.cryptoScheme, - cipherBundle.masterKeyId, + key, log, (err, plainTextDataKey, cipheredDataKey) => { if (err) { log.debug('error generating a new data key from KMS', @@ -247,7 +390,7 @@ class KMS { log.debug('ciphering the data key'); res = client.cipherDataKey(cipherBundle.cryptoScheme, - cipherBundle.masterKeyId, + key, plainTextDataKey, log, (err, cipheredDataKey) => { if (err) { log.debug('error encrypting the data key using KMS', @@ -320,17 +463,25 @@ class KMS { cryptoScheme: serverSideEncryptionInfo.cryptoScheme, decipher: null, }; + + // shadowing global client for key - implName already used can't be shadowed here + const { error, client, implName: _impl, key } = getClientForKey( + serverSideEncryptionInfo.masterKeyId, log); + if (error) { + return cb(error); + } + return async.waterfall([ function decipherDataKey(next) { return client.decipherDataKey( decipherBundle.cryptoScheme, - serverSideEncryptionInfo.masterKeyId, + key, serverSideEncryptionInfo.cipheredDataKey, log, (err, plainTextDataKey) => { log.debug('deciphering a data key'); if (err) { log.debug('error from kms', - { implName, error: err }); + { implName: _impl, error: err }); return next(err); } log.trace('data key deciphered by the kms'); @@ -344,7 +495,7 @@ class KMS { plainTextDataKey.fill(0); if (err) { log.debug('error from kms', - { implName, error: err }); + { implName: _impl, error: err }); return next(err); } log.trace('decipher created by the kms'); @@ -358,7 +509,7 @@ class KMS { ], (err, decipherBundle) => { if (err) { log.error('error processing decipher bundle', - { implName, error: err }); + { implName: _impl, error: err }); return cb(err); } return cb(err, decipherBundle); diff --git a/lib/utilities/collectResponseHeaders.js b/lib/utilities/collectResponseHeaders.js index d61b368b42..2d9c25a877 100644 --- a/lib/utilities/collectResponseHeaders.js +++ b/lib/utilities/collectResponseHeaders.js @@ -1,7 +1,8 @@ const { getVersionIdResHeader } = require('../api/apiUtils/object/versioning'); const checkUserMetadataSize = require('../api/apiUtils/object/checkUserMetadataSize'); - +const { config } = require('../Config'); +const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); /** * Pulls data from saved object metadata to send in response * @param {object} objectMD - object's metadata @@ -40,10 +41,11 @@ function collectResponseHeaders(objectMD, corsHeaders, versioningCfg, responseMetaHeaders['x-amz-server-side-encryption'] = objectMD['x-amz-server-side-encryption']; } - if (objectMD['x-amz-server-side-encryption-aws-kms-key-id'] && + const kmsKey = objectMD['x-amz-server-side-encryption-aws-kms-key-id']; + if (kmsKey && objectMD['x-amz-server-side-encryption'] === 'aws:kms') { responseMetaHeaders['x-amz-server-side-encryption-aws-kms-key-id'] - = objectMD['x-amz-server-side-encryption-aws-kms-key-id']; + = config.kmsHideScalityArn ? getKeyIdFromArn(kmsKey) : kmsKey; } responseMetaHeaders['Accept-Ranges'] = 'bytes'; diff --git a/package.json b/package.json index 1b4eddc9ce..50dcd08047 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "s3", - "version": "7.70.65", + "version": "7.70.66", "description": "S3 connector", "main": "index.js", "engines": { @@ -20,7 +20,7 @@ "homepage": "https://github.com/scality/S3#readme", "dependencies": { "@hapi/joi": "^17.1.0", - "arsenal": "git+https://github.com/scality/arsenal#7.70.43", + "arsenal": "git+https://github.com/scality/Arsenal#7.70.44", "async": "~2.5.0", "aws-sdk": "2.905.0", "azure-storage": "^2.1.0", diff --git a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js index c9c4c41a14..0f5cd65342 100644 --- a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js @@ -6,6 +6,11 @@ const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const kms = require('../../../../../lib/kms/wrapper'); const { DummyRequestLogger } = require('../../../../unit/helpers'); +const { config } = require('../../../../../lib/Config'); +const { getKeyIdFromArn } = require('arsenal/build/lib/network/KMSInterface'); + +// For this test env S3_CONFIG_FILE should be the same as running cloudserver +// to have the same config.kmsHideScalityArn value const log = new DummyRequestLogger(); @@ -55,7 +60,9 @@ function createExpected(sseConfig, kmsKeyId) { } if (sseConfig.masterKeyId) { - expected.masterKeyId = kmsKeyId; + expected.masterKeyId = config.kmsHideScalityArn + ? getKeyIdFromArn(kmsKeyId) + : kmsKeyId; } return expected; } @@ -91,7 +98,7 @@ describe('per object encryption headers', () => { const bucket = new BucketInfo('enc-bucket-test', 'OwnerId', 'OwnerDisplayName', new Date().toJSON()); kms.createBucketKey(bucket, log, - (err, { masterKeyId: keyId }) => { + (err, { masterKeyArn: keyId }) => { assert.ifError(err); kmsKeyId = keyId; done(); @@ -221,7 +228,9 @@ describe('per object encryption headers', () => { done => { const _existing = Object.assign({}, existing); if (existing.masterKeyId) { - _existing.masterKeyId = kmsKeyId; + _existing.masterKeyId = config.kmsHideScalityArn + ? getKeyIdFromArn(kmsKeyId) + : kmsKeyId; } const params = { Bucket: bucket2, diff --git a/tests/unit/api/bucketDeleteEncryption.js b/tests/unit/api/bucketDeleteEncryption.js index 5d94b49f55..13bf39bb6a 100644 --- a/tests/unit/api/bucketDeleteEncryption.js +++ b/tests/unit/api/bucketDeleteEncryption.js @@ -18,6 +18,8 @@ const bucketPutRequest = { actionImplicitDenies: false, }; +const arnPrefix = inMemory.backend.arnPrefix; + describe('bucketDeleteEncryption API', () => { before(() => cleanup()); @@ -130,7 +132,7 @@ describe('bucketDeleteEncryption API', () => { assert.strictEqual(sseInfo.mandatory, true); assert.strictEqual(sseInfo.algorithm, 'aws:kms'); assert(!sseInfo.masterKeyId); - assert.strictEqual(sseInfo.configuredMasterKeyId, keyId2); + assert.strictEqual(sseInfo.configuredMasterKeyId, `${arnPrefix}${keyId2}`); done(); }); }); @@ -156,7 +158,7 @@ describe('bucketDeleteEncryption API', () => { assert.strictEqual(sseInfo.mandatory, true); assert.strictEqual(sseInfo.algorithm, 'aws:kms'); assert.strictEqual(sseInfo.masterKeyId, expectedMasterKeyId); - assert.strictEqual(sseInfo.configuredMasterKeyId, keyId); + assert.strictEqual(sseInfo.configuredMasterKeyId, `${arnPrefix}${keyId}`); done(); }); }); diff --git a/tests/unit/api/bucketPut.js b/tests/unit/api/bucketPut.js index f64ae560f6..b4105e887e 100644 --- a/tests/unit/api/bucketPut.js +++ b/tests/unit/api/bucketPut.js @@ -27,6 +27,7 @@ const testRequest = { post: '', headers: { host: `${bucketName}.s3.amazonaws.com` }, }; +const arnPrefix = inMemory.backend.arnPrefix; const testChecks = [ { @@ -489,6 +490,7 @@ describe('bucketPut API with bucket-level encryption', () => { assert.strictEqual(serverSideEncryption.algorithm, 'AES256'); assert.strictEqual(serverSideEncryption.mandatory, true); assert(serverSideEncryption.masterKeyId); + assert.match(serverSideEncryption.masterKeyId, new RegExp(arnPrefix)); assert(!serverSideEncryption.isAccountEncryptionEnabled); done(); }); @@ -512,6 +514,7 @@ describe('bucketPut API with bucket-level encryption', () => { assert.strictEqual(serverSideEncryption.algorithm, 'aws:kms'); assert.strictEqual(serverSideEncryption.mandatory, true); assert(serverSideEncryption.masterKeyId); + assert.match(serverSideEncryption.masterKeyId, new RegExp(arnPrefix)); assert(!serverSideEncryption.isAccountEncryptionEnabled); done(); }); @@ -537,7 +540,7 @@ describe('bucketPut API with bucket-level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - configuredMasterKeyId: keyId, + configuredMasterKeyId: `${arnPrefix}${keyId}`, }); done(); }); @@ -595,7 +598,7 @@ describe('bucketPut API with account level encryption', () => { cryptoScheme: 1, algorithm: 'AES256', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); done(); @@ -620,7 +623,7 @@ describe('bucketPut API with account level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); done(); @@ -647,7 +650,7 @@ describe('bucketPut API with account level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - configuredMasterKeyId: keyId, + configuredMasterKeyId: `${arnPrefix}${keyId}`, }); done(); }); @@ -753,7 +756,7 @@ describe('bucketPut API with SSE Configurations', () => { const sse = md.getServerSideEncryption(); assert.strictEqual(sse.algorithm, 'aws:kms'); assert.strictEqual(sse.mandatory, true); - assert.strictEqual(sse.configuredMasterKeyId, 'test-kms-key-id'); + assert.strictEqual(sse.configuredMasterKeyId, `${arnPrefix}test-kms-key-id`); done(); }); }); @@ -824,7 +827,7 @@ describe('bucketPut API with SSE Configurations', () => { const sse = md.getServerSideEncryption(); assert.strictEqual(sse.algorithm, 'aws:kms'); assert.strictEqual(sse.mandatory, true); - assert.strictEqual(sse.configuredMasterKeyId, 'another-kms-key-id'); + assert.strictEqual(sse.configuredMasterKeyId, `${arnPrefix}another-kms-key-id`); done(); }); }); diff --git a/tests/unit/api/bucketPutEncryption.js b/tests/unit/api/bucketPutEncryption.js index 4d42a625fa..b635973345 100644 --- a/tests/unit/api/bucketPutEncryption.js +++ b/tests/unit/api/bucketPutEncryption.js @@ -20,6 +20,7 @@ const bucketPutRequest = { url: '/', actionImplicitDenies: false, }; +const arnPrefix = inMemory.backend.arnPrefix; describe('bucketPutEncryption API', () => { let createBucketKeySpy; @@ -138,7 +139,7 @@ describe('bucketPutEncryption API', () => { mandatory: true, algorithm: 'aws:kms', cryptoScheme: 1, - configuredMasterKeyId: '12345', + configuredMasterKeyId: `${arnPrefix}12345`, }); done(); }); @@ -213,7 +214,7 @@ describe('bucketPutEncryption API', () => { algorithm: 'aws:kms', cryptoScheme: 1, masterKeyId, - configuredMasterKeyId: '12345', + configuredMasterKeyId: `${arnPrefix}12345`, }); done(); }); @@ -260,6 +261,7 @@ describe('bucketPutEncryption API', () => { assert.strictEqual(updatedSSEInfo.algorithm, 'AES256'); assert.strictEqual(updatedSSEInfo.cryptoScheme, 1); assert(updatedSSEInfo.masterKeyId); + assert(updatedSSEInfo.masterKeyId, new RegExp(arnPrefix)); done(); }); }); @@ -315,7 +317,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'AES256', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); done(); @@ -334,7 +336,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); done(); @@ -354,7 +356,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - configuredMasterKeyId: keyId, + configuredMasterKeyId: `${arnPrefix}${keyId}`, }); done(); }); @@ -367,7 +369,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'AES256', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }; @@ -400,7 +402,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'AES256', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); const keyId = '12345'; @@ -413,8 +415,8 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - masterKeyId: accountLevelMasterKeyId, - configuredMasterKeyId: keyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, + configuredMasterKeyId: `${arnPrefix}${keyId}`, isAccountEncryptionEnabled: true, }); done(); @@ -435,7 +437,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); const newConf = templateSSEConfig({ algorithm: 'AES256' }); @@ -447,7 +449,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'AES256', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); done(); @@ -469,7 +471,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'aws:kms', mandatory: true, - configuredMasterKeyId: keyId, + configuredMasterKeyId: `${arnPrefix}${keyId}`, }); const newConf = templateSSEConfig({ algorithm: 'AES256' }); return bucketPutEncryption(authInfo, templateRequest(bucketName, { post: newConf }), log, @@ -480,7 +482,7 @@ describe('bucketPutEncryption API with account level encryption', () => { cryptoScheme: 1, algorithm: 'AES256', mandatory: true, - masterKeyId: accountLevelMasterKeyId, + masterKeyId: `${arnPrefix}${accountLevelMasterKeyId}`, isAccountEncryptionEnabled: true, }); done(); diff --git a/tests/unit/encryption/kms.js b/tests/unit/encryption/kms.js index 7abd22c191..ff94a77d8a 100644 --- a/tests/unit/encryption/kms.js +++ b/tests/unit/encryption/kms.js @@ -48,7 +48,7 @@ describe('KMS unit tests', () => { assert.strictEqual(sseInfo.cryptoScheme, 1); assert.strictEqual(sseInfo.mandatory, true); assert.strictEqual(sseInfo.algorithm, 'aws:kms'); - assert.strictEqual(sseInfo.configuredMasterKeyId, masterKeyId); + assert.strictEqual(sseInfo.configuredMasterKeyId, `${KMS.arnPrefix}${masterKeyId}`); done(); }); }); diff --git a/tests/unit/testConfigs/parseKmsAWS.js b/tests/unit/testConfigs/parseKmsAWS.js index d4a327413d..bd0bc32c31 100644 --- a/tests/unit/testConfigs/parseKmsAWS.js +++ b/tests/unit/testConfigs/parseKmsAWS.js @@ -19,23 +19,24 @@ describe('parseKmsAWS Function', () => { }); it('should throw an error if endpoint is not defined in kmsAWS', () => { - const config = { kmsAWS: { ak: 'ak', sk: 'sk' } }; + const config = { kmsAWS: { providerName: 'tests', ak: 'ak', sk: 'sk' } }; assert.throws(() => configInstance._parseKmsAWS(config), 'endpoint must be defined'); }); it('should throw an error if ak is not defined in kmsAWS', () => { - const config = { kmsAWS: { endpoint: 'https://example.com', sk: 'sk' } }; + const config = { kmsAWS: { providerName: 'tests', endpoint: 'https://example.com', sk: 'sk' } }; assert.throws(() => configInstance._parseKmsAWS(config), 'ak must be defined'); }); it('should throw an error if sk is not defined in kmsAWS', () => { - const config = { kmsAWS: { endpoint: 'https://example.com', ak: 'ak' } }; + const config = { kmsAWS: { providerName: 'tests', endpoint: 'https://example.com', ak: 'ak' } }; assert.throws(() => configInstance._parseKmsAWS(config), 'sk must be defined'); }); it('should return the expected kmsAWS object when valid config is provided', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -43,6 +44,7 @@ describe('parseKmsAWS Function', () => { }; const result = configInstance._parseKmsAWS(config); assert.deepStrictEqual(result, { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -52,6 +54,7 @@ describe('parseKmsAWS Function', () => { it('should include region if provided in the config', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -60,6 +63,7 @@ describe('parseKmsAWS Function', () => { }; const result = configInstance._parseKmsAWS(config); assert.deepStrictEqual(result, { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -70,6 +74,7 @@ describe('parseKmsAWS Function', () => { it('should include tls configuration if provided', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -82,6 +87,7 @@ describe('parseKmsAWS Function', () => { }; const result = configInstance._parseKmsAWS(config); assert.deepStrictEqual(result, { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -112,6 +118,7 @@ describe('parseKmsAWS TLS section', () => { it('should throw an error if tls.rejectUnauthorized is not a boolean', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -127,6 +134,7 @@ describe('parseKmsAWS TLS section', () => { it('should throw an error if tls.minVersion is not a string', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -144,6 +152,7 @@ describe('parseKmsAWS TLS section', () => { it('should throw an error if tls.maxVersion is not a string', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -161,6 +170,7 @@ describe('parseKmsAWS TLS section', () => { it('should throw an error if tls.ca is not a string or an array', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -178,6 +188,7 @@ describe('parseKmsAWS TLS section', () => { it('should return an empty tls object if all tls fields are undefined', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'https://example.com', ak: 'accessKey', sk: 'secretKey', @@ -192,6 +203,7 @@ describe('parseKmsAWS TLS section', () => { it('should load tls.ca as an array of files', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'http://example.com', ak: 'accessKey', sk: 'secretKey', @@ -212,6 +224,7 @@ describe('parseKmsAWS TLS section', () => { it('should load tls.cert as a single file', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'http://example.com', ak: 'accessKey', sk: 'secretKey', @@ -231,6 +244,7 @@ describe('parseKmsAWS TLS section', () => { it('should load tls.key as a single file', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'http://example.com', ak: 'accessKey', sk: 'secretKey', @@ -250,6 +264,7 @@ describe('parseKmsAWS TLS section', () => { it('should not load TLS files if tls is undefined', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'http://example.com', ak: 'accessKey', sk: 'secretKey', @@ -267,6 +282,7 @@ describe('parseKmsAWS TLS section', () => { const basePath = configInstance._basePath; const config = { kmsAWS: { + providerName: 'tests', endpoint: 'http://example.com', ak: 'accessKey', sk: 'secretKey', @@ -292,6 +308,7 @@ describe('parseKmsAWS TLS section', () => { const config = { kmsAWS: { + providerName: 'tests', endpoint: 'http://example.com', ak: 'accessKey', sk: 'secretKey', diff --git a/tests/unit/utils/multipleBackendGateway.js b/tests/unit/utils/multipleBackendGateway.js index c9d783b676..c62b89bff6 100644 --- a/tests/unit/utils/multipleBackendGateway.js +++ b/tests/unit/utils/multipleBackendGateway.js @@ -1,5 +1,6 @@ const assert = require('assert'); const { checkExternalBackend } = require('arsenal').storage.data.external.backendUtils; +const sinon = require('sinon'); const awsLocations = [ 'awsbackend', ]; @@ -29,10 +30,14 @@ function getClients(isSuccess) { describe('Testing _checkExternalBackend', function describeF() { this.timeout(50000); beforeEach(done => { + this.clock = sinon.useFakeTimers({ shouldAdvanceTime: true }); const clients = getClients(true); return checkExternalBackend(clients, awsLocations, 'aws_s3', false, externalBackendHealthCheckInterval, done); }); + afterEach(() => { + this.clock.restore(); + }); it('should not refresh response before externalBackendHealthCheckInterval', done => { const clients = getClients(false); @@ -59,5 +64,6 @@ describe('Testing _checkExternalBackend', function describeF() { return done(); }); }, externalBackendHealthCheckInterval + 1); + this.clock.next(); // test faster }); }); diff --git a/yarn.lock b/yarn.lock index 04778bd144..ba5ca34409 100644 --- a/yarn.lock +++ b/yarn.lock @@ -156,9 +156,9 @@ integrity sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ== "@socket.io/component-emitter@~3.1.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz#96116f2a912e0c02817345b3c10751069920d553" - integrity sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg== + version "3.1.2" + resolved "https://registry.yarnpkg.com/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz#821f8442f4175d8f0467b9daf26e3a18e2d02af2" + integrity sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA== "@tootallnate/once@1": version "1.1.2" @@ -176,16 +176,18 @@ integrity sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q== "@types/cors@^2.8.12": - version "2.8.13" - resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.13.tgz#b8ade22ba455a1b8cb3b5d3f35910fd204f84f94" - integrity sha512-RG8AStHlUiV5ysZQKq97copd2UmVYw3/pRMLefISZ3S1hK104Cwm7iLQ3fTKx+lsUH2CE8FlLaYeEA2LSeqYUA== + version "2.8.18" + resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.18.tgz#101e033b3ca06695f3d73c587cd7f9eb348135d1" + integrity sha512-nX3d0sxJW41CqQvfOzVG1NCTXfFDrDWIghCZncpHeWlVFd81zxB/DLhg7avFg6eHLCRX7ckBmoIIcqa++upvJA== dependencies: "@types/node" "*" "@types/node@*", "@types/node@>=10.0.0": - version "20.4.4" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.4.4.tgz#c79c7cc22c9d0e97a7944954c9e663bcbd92b0cb" - integrity sha512-CukZhumInROvLq3+b5gLev+vgpsIqC2D0deQr/yS1WnxvmYLlJXZpaQrQiseMY+6xusl79E04UjWoqyr+t1/Ew== + version "22.15.18" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.15.18.tgz#2f8240f7e932f571c2d45f555ba0b6c3f7a75963" + integrity sha512-v1DKRfUdyW+jJhZNEI1PYy29S2YRxMV5AOO/x/SjKmW0acCIOqmbj6Haf9eHAhsPmrhlHSxEhv/1WszcLWV4cg== + dependencies: + undici-types "~6.21.0" "@types/triple-beam@^1.3.2": version "1.3.2" @@ -499,9 +501,9 @@ arraybuffer.slice@~0.0.7: optionalDependencies: ioctl "^2.0.2" -"arsenal@git+https://github.com/scality/arsenal#7.70.43": - version "7.70.43" - resolved "git+https://github.com/scality/arsenal#36d49b1a722eef07f0ae75234e2d896613237381" +"arsenal@git+https://github.com/scality/Arsenal#7.70.44": + version "7.70.44" + resolved "git+https://github.com/scality/Arsenal#67ad234dd8b98530c64f0fd04c99734b979e79b2" dependencies: "@js-sdsl/ordered-set" "^4.4.2" "@types/async" "^3.2.12" @@ -1341,7 +1343,7 @@ debug@2.6.9, debug@^2.1.1, debug@^2.2.0, debug@^2.6.8, debug@~2.6.9: dependencies: ms "2.0.0" -debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.3, debug@~4.3.1, debug@~4.3.2: +debug@4, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.3: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== @@ -1369,6 +1371,13 @@ debug@~4.1.0: dependencies: ms "^2.1.1" +debug@~4.3.1, debug@~4.3.2, debug@~4.3.4: + version "4.3.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + dependencies: + ms "^2.1.3" + deep-is@~0.1.3: version "0.1.4" resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831" @@ -3854,7 +3863,7 @@ ms@2.1.2: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@2.1.3, ms@^2.0.0, ms@^2.1.1: +ms@2.1.3, ms@^2.0.0, ms@^2.1.1, ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -4829,11 +4838,12 @@ socket.io-adapter@~1.1.0: integrity sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g== socket.io-adapter@~2.5.2: - version "2.5.2" - resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-2.5.2.tgz#5de9477c9182fdc171cd8c8364b9a8894ec75d12" - integrity sha512-87C3LO/NOMc+eMcpcxUBebGjkpMDkNBS9tf7KJqcDsmL936EChtVva71Dw2q4tQcuVC+hAUy4an2NO/sYXmwRA== + version "2.5.5" + resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-2.5.5.tgz#c7a1f9c703d7756844751b6ff9abfc1780664082" + integrity sha512-eLDQas5dzPgOWCk9GuuJC2lBqItuhKI4uxGgo9aIV7MYbk2h9Q6uULEh8WBzThoI7l+qU9Ast9fVUmkqPP9wYg== dependencies: - ws "~8.11.0" + debug "~4.3.4" + ws "~8.17.1" socket.io-client@2.3.0: version "2.3.0" @@ -5448,6 +5458,11 @@ underscore@^1.12.1: resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.13.6.tgz#04786a1f589dc6c09f761fc5f45b89e935136441" integrity sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A== +undici-types@~6.21.0: + version "6.21.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.21.0.tgz#691d00af3909be93a7faa13be61b3a5b50ef12cb" + integrity sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ== + unique-filename@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230" @@ -5770,6 +5785,11 @@ ws@~8.11.0: resolved "https://registry.yarnpkg.com/ws/-/ws-8.11.0.tgz#6a0d36b8edfd9f96d8b25683db2f8d7de6e8e143" integrity sha512-HPG3wQd9sNQoT9xHyNCXoDUa+Xw/VevmY9FoHyQ+g+rrMn4j6FB4np7Z0OhdTgjx6MgQLK7jwSy1YecU1+4Asg== +ws@~8.17.1: + version "8.17.1" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.17.1.tgz#9293da530bb548febc95371d90f9c878727d919b" + integrity sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ== + xml2js@0.4.19: version "0.4.19" resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.4.19.tgz#686c20f213209e94abf0d1bcf1efaa291c7827a7"