From 7c88864aa331c8d6db03ea31f837e4cf9c6716cc Mon Sep 17 00:00:00 2001 From: williamlardier Date: Tue, 10 Dec 2024 16:00:48 +0100 Subject: [PATCH 01/10] Document backbeat routes Issue: CLDSRV-591 --- docs/BACKBEAT_ROUTES.md | 152 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 docs/BACKBEAT_ROUTES.md diff --git a/docs/BACKBEAT_ROUTES.md b/docs/BACKBEAT_ROUTES.md new file mode 100644 index 0000000000..fc1775fe9a --- /dev/null +++ b/docs/BACKBEAT_ROUTES.md @@ -0,0 +1,152 @@ +# Backbeat routes + +Backbeat routes are implemented in `lib/routes/routeBackbeat.js`. + +This special router is responsible for handling all the requests that are +related to the Backbeat service. Backbeat may call any of the below APIs to +perform operations on either data or s3 objects (metadata). + +These route follow the same authorization and validation as the S3 routes: + +- Authorize the request with support for Implicit Denies from the IAM service. +- Retrieve the bucket and object metadata if applicable. +- Evaluate the S3 Bucket Policies and ACLs before authozing the request. + - Backbeat routes are only authorized given the right permission, currently, + `objectReplicate` as a unique permission for all these special routes. + - In order to be authorized without S3 Bucket Policy, the caller must be + authorized by the IAM service and the ACLs. Service accounts and accounts + are allowed. +- Finally, evaluate the quotas before allowing the request to proceed. + +## List of supported APIs + +```plaintext +PUT /_/backbeat/metadata// +``` + +To edit one existing S3 Object's metadata. + +```plaintext +GET /_/backbeat/metadata//?versionId= +``` + +To get one existing S3 Object's metadata. Version id can be specified to get +the metadata of a specific version. + +```plaintext +PUT /_/backbeat/data// +``` + +To put directly to the storage layer the data for an existing S3 Object. + +```plaintext +PUT /_/backbeat/multiplebackenddata//?operation=putobject +``` + +To put directly to the storage layer the data for an existing S3 Object. +Use case: Cross Region Replication (CRR). + +```plaintext +PUT /_/backbeat/multiplebackenddata//?operation=putpart +``` + +To put directly to the storage layer the data for an existing S3 Object part. +Use case: Cross Region Replication (CRR). + +```plaintext +DELETE /_/backbeat/multiplebackenddata//?operation=deleteobject +``` + +To delete the data for an existing S3 Object. +Use case: Cross Region Replication (CRR). + +```plaintext +DELETE /_/backbeat/multiplebackenddata//?operation=abortmpu +``` + +To abort a multipart upload. +Use case: Cross Region Replication (CRR). + +```plaintext +DELETE /_/backbeat/multiplebackenddata//?operation=deleteobjecttagging +``` + +To delete the tagging for an existing S3 Object. +Use case: Cross Region Replication (CRR). + +```plaintext +POST /_/backbeat/multiplebackenddata//?operation=initiatempu +``` + +To initiate a multipart upload. +Use case: Cross Region Replication (CRR). + +```plaintext +POST /_/backbeat/multiplebackenddata//?operation=completempu +``` + +To complete a multipart upload. +Use case: Cross Region Replication (CRR). + +```plaintext +POST /_/backbeat/multiplebackenddata//?operation=puttagging +``` + +To put the tagging for an existing S3 Object. +Use case: Cross Region Replication (CRR). + +```plaintext +GET /_/backbeat/multiplebackendmetadata// +``` + +To get the metadata for an existing S3 Object. Similar to a S3 HeadObject. +Use case: Cross Region Replication (CRR). + +```plaintext +POST /_/backbeat/batchdelete +``` + +Delete a batch of objects froem the storage layer. +Use case: restored S3 Object expiration. + +```plaintext +GET /_/backbeat/lifecycle/?list-type=current +``` + +To list current S3 Object versions from an S3 Bucket. +Use case: lifecycle listings. + +```plaintext +GET /_/backbeat/lifecycle/?list-type=noncurrent +``` + +To list noncurrent S3 Object versions from an S3 Bucket. +Use case: lifecycle listings. + +```plaintext +GET /_/backbeat/lifecycle/?list-type=orphan +``` + +To list delete markers from an S3 Bucket. +Use case: lifecycle listings. + +```plaintext +POST /_/backbeat/index/?operation=add +``` + +To create an index for a bucket. +Use case: MongoDB backend. + +```plaintext +POST /_/backbeat/index/?operation=delete +``` + +To delete an index for a bucket. +Use case: MongoDB backend. + +```plaintext +GET /_/backbeat/index/ +``` + +To get the index for a bucket. +Use case: MongoDB backend. From c0bb42805d894190b9056ffceab5f8e389462a9a Mon Sep 17 00:00:00 2001 From: williamlardier Date: Tue, 10 Dec 2024 16:02:07 +0100 Subject: [PATCH 02/10] Ensure backbeat routes are authorizing requests Today, authorization results are not used. It is because before having implicit denies, we would have an AccessDenied directly. To be consistent, we must pass the authorization results. We thus allow the bucket policies and ACLs to be processed. Unit tests are added to cover all existing routes. Routes themselves are not direectly tested, only the router logic. We must first add tests for backbeat routes before merging more logic with the normal router. Issue: CLDSRV-591 --- lib/api/api.js | 85 ++++---- lib/routes/routeBackbeat.js | 139 +++++++------ lib/utilities/internalHandlers.js | 2 +- tests/unit/DummyRequest.js | 8 + tests/unit/routeBackbeat.js | 326 ++++++++++++++++++++++++++++++ 5 files changed, 457 insertions(+), 103 deletions(-) create mode 100644 tests/unit/routeBackbeat.js diff --git a/lib/api/api.js b/lib/api/api.js index bf0d9f65d4..5a98eb0da6 100644 --- a/lib/api/api.js +++ b/lib/api/api.js @@ -79,6 +79,45 @@ const monitoringMap = policies.actionMaps.actionMonitoringMapS3; auth.setHandler(vault); +function checkAuthResults(authResults, apiMethod, log) { + let returnTagCount = true; + const isImplicitDeny = {}; + let isOnlyImplicitDeny = true; + if (apiMethod === 'objectGet') { + if (!authResults[0].isAllowed && !authResults[0].isImplicit) { + log.trace('get object authorization denial from Vault'); + return errors.AccessDenied; + } + isImplicitDeny[authResults[0].action] = authResults[0].isImplicit; + if (!authResults[1].isAllowed) { + log.trace('get tagging authorization denial ' + + 'from Vault'); + returnTagCount = false; + } + } else { + for (let i = 0; i < authResults.length; i++) { + isImplicitDeny[authResults[i].action] = true; + if (!authResults[i].isAllowed && !authResults[i].isImplicit) { + // Any explicit deny rejects the current API call + log.trace('authorization denial from Vault'); + return errors.AccessDenied; + } + if (authResults[i].isAllowed) { + // If the action is allowed, the result is not implicit + // Deny. + isImplicitDeny[authResults[i].action] = false; + isOnlyImplicitDeny = false; + } + } + } + // These two APIs cannot use ACLs or Bucket Policies, hence, any + // implicit deny from vault must be treated as an explicit deny. + if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) { + return errors.AccessDenied; + } + return { returnTagCount, isImplicitDeny }; +} + /* eslint-disable no-param-reassign */ const api = { callApiMethod(apiMethod, request, response, log, callback) { @@ -152,49 +191,6 @@ const api = { // eslint-disable-next-line no-param-reassign request.apiMethods = apiMethods; - function checkAuthResults(authResults) { - let returnTagCount = true; - const isImplicitDeny = {}; - let isOnlyImplicitDeny = true; - if (apiMethod === 'objectGet') { - // first item checks s3:GetObject(Version) action - if (!authResults[0].isAllowed && !authResults[0].isImplicit) { - log.trace('get object authorization denial from Vault'); - return errors.AccessDenied; - } - // TODO add support for returnTagCount in the bucket policy - // checks - isImplicitDeny[authResults[0].action] = authResults[0].isImplicit; - // second item checks s3:GetObject(Version)Tagging action - if (!authResults[1].isAllowed) { - log.trace('get tagging authorization denial ' + - 'from Vault'); - returnTagCount = false; - } - } else { - for (let i = 0; i < authResults.length; i++) { - isImplicitDeny[authResults[i].action] = true; - if (!authResults[i].isAllowed && !authResults[i].isImplicit) { - // Any explicit deny rejects the current API call - log.trace('authorization denial from Vault'); - return errors.AccessDenied; - } - if (authResults[i].isAllowed) { - // If the action is allowed, the result is not implicit - // Deny. - isImplicitDeny[authResults[i].action] = false; - isOnlyImplicitDeny = false; - } - } - } - // These two APIs cannot use ACLs or Bucket Policies, hence, any - // implicit deny from vault must be treated as an explicit deny. - if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) { - return errors.AccessDenied; - } - return { returnTagCount, isImplicitDeny }; - } - return async.waterfall([ next => auth.server.doAuth( request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { @@ -273,7 +269,7 @@ const api = { } request.accountQuotas = infos?.accountQuota; if (authorizationResults) { - const checkedResults = checkAuthResults(authorizationResults); + const checkedResults = checkAuthResults(authorizationResults, apiMethod, log); if (checkedResults instanceof Error) { return callback(checkedResults); } @@ -372,6 +368,7 @@ const api = { serviceGet, websiteGet: website, websiteHead: website, + checkAuthResults, }; module.exports = api; diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index d1b6743a9a..7327a395df 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -39,6 +39,7 @@ const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCu const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers'); const { objectDeleteInternal } = require('../api/objectDelete'); const { validateQuotas } = require('../api/apiUtils/quotas/quotaUtils'); +const { checkAuthResults } = require('../api/api'); const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing; @@ -315,36 +316,6 @@ function handleTaggingOperation(request, response, type, dataStoreVersionId, }); } -/* -PUT /_/backbeat/metadata// -GET /_/backbeat/metadata//?versionId= -PUT /_/backbeat/data// -PUT /_/backbeat/multiplebackenddata// - ?operation=putobject -PUT /_/backbeat/multiplebackenddata// - ?operation=putpart -DELETE /_/backbeat/multiplebackenddata// - ?operation=deleteobject -DELETE /_/backbeat/multiplebackenddata// - ?operation=abortmpu -DELETE /_/backbeat/multiplebackenddata// - ?operation=deleteobjecttagging -POST /_/backbeat/multiplebackenddata// - ?operation=initiatempu -POST /_/backbeat/multiplebackenddata// - ?operation=completempu -POST /_/backbeat/multiplebackenddata// - ?operation=puttagging -GET /_/backbeat/multiplebackendmetadata// -POST /_/backbeat/batchdelete -GET /_/backbeat/lifecycle/?list-type=current -GET /_/backbeat/lifecycle/?list-type=noncurrent -GET /_/backbeat/lifecycle/?list-type=orphan -POST /_/backbeat/index/?operation=add -POST /_/backbeat/index/?operation=delete -DELETE /_/backbeat/index/ -*/ - function _getLastModified(locations, log, cb) { const reqUids = log.getSerializedUids(); return dataClient.head(locations, reqUids, (err, data) => { @@ -1336,19 +1307,20 @@ const indexEntrySchema = joi.object({ const indexingSchema = joi.array().items(indexEntrySchema).min(1); -function routeIndexingAPIs(request, response, userInfo, log) { +function routeIndexingAPIs(request, response, userInfo, log, callback) { const route = backbeatRoutes[request.method][request.resourceType]; if (!['GET', 'POST'].includes(request.method)) { - return responseJSONBody(errors.MethodNotAllowed, null, response, log); + responseJSONBody(errors.MethodNotAllowed, null, response, log); + return callback(errors.MethodNotAllowed); } if (request.method === 'GET') { return route(request, response, userInfo, log, err => { if (err) { - return responseJSONBody(err, null, response, log); + responseJSONBody(err, null, response, log); } - return undefined; + return callback(err); }); } @@ -1356,12 +1328,14 @@ function routeIndexingAPIs(request, response, userInfo, log) { if (!op || typeof route[op] !== 'function') { log.error('Invalid operataion parameter', { operation: op }); - return responseJSONBody(errors.BadRequest, null, response, log); + responseJSONBody(errors.BadRequest, null, response, log); + return callback(errors.BadRequest); } return _getRequestPayload(request, (err, payload) => { if (err) { - return responseJSONBody(err, null, response, log); + responseJSONBody(err, null, response, log); + return callback(err); } let parsedIndex; @@ -1370,20 +1344,20 @@ function routeIndexingAPIs(request, response, userInfo, log) { parsedIndex = joi.attempt(JSON.parse(payload), indexingSchema, 'invalid payload'); } catch (err) { log.error('Unable to parse index request body', { error: err }); - return responseJSONBody(errors.BadRequest, null, response, log); + responseJSONBody(errors.BadRequest, null, response, log); + return callback(errors.BadRequest); } return route[op](parsedIndex, request, response, userInfo, log, err => { if (err) { - return responseJSONBody(err, null, response, log); + responseJSONBody(err, null, response, log); } - return undefined; + return callback(err); }); }); } - -function routeBackbeat(clientIP, request, response, log) { +function routeBackbeat(clientIP, request, response, log, callback) { // Attach the apiMethod method to the request, so it can used by monitoring in the server // eslint-disable-next-line no-param-reassign request.apiMethod = 'routeBackbeat'; @@ -1411,14 +1385,20 @@ function routeBackbeat(clientIP, request, response, log) { // eslint-disable-next-line no-param-reassign request.finalizerHooks = []; + // Extract all the _apiMethods and store them in an array + const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : []; + // Attach the names to the current request + // eslint-disable-next-line no-param-reassign + request.apiMethods = apiMethods; + // proxy api requests to Backbeat API server if (request.resourceType === 'api') { if (!config.backbeat) { log.debug('unable to proxy backbeat api request', { backbeatConfig: config.backbeat, }); - return responseJSONBody(errors.MethodNotAllowed, null, response, - log); + responseJSONBody(errors.MethodNotAllowed, null, response, log); + return callback(errors.MethodNotAllowed); } const path = request.url.replace('/_/backbeat/api', '/_/'); const { host, port } = config.backbeat; @@ -1433,10 +1413,30 @@ function routeBackbeat(clientIP, request, response, log) { bucketName: request.bucketName, objectKey: request.objectKey, }); - return responseJSONBody(err, null, response, log); + responseJSONBody(err, null, response, log); + return callback(err); } // eslint-disable-next-line no-param-reassign request.accountQuotas = infos?.accountQuota; + if (authorizationResults) { + const checkedResults = checkAuthResults(authorizationResults, apiMethods[0], log); + if (checkedResults instanceof Error) { + responseJSONBody(errors.AccessDenied, null, response, log); + return callback(errors.AccessDenied); + } + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = checkedResults.isImplicitDeny; + } else { + // create an object of keys apiMethods with all values to false: + // for backward compatibility, all apiMethods are allowed by default + // thus it is explicitly allowed, so implicit deny is false + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { + // eslint-disable-next-line no-param-reassign + acc[curr] = false; + return acc; + }, {}); + } // FIXME for now, any authenticated user can access API // routes. We should introduce admin accounts or accounts // with admin privileges, and restrict access to those @@ -1447,14 +1447,14 @@ function routeBackbeat(clientIP, request, response, log) { bucketName: request.bucketName, objectKey: request.objectKey, }); - return responseJSONBody( - errors.AccessDenied, null, response, log); + responseJSONBody(errors.AccessDenied, null, response, log); + return callback(errors.AccessDenied); } return backbeatProxy.web(request, response, { target }, err => { log.error('error proxying request to api server', - { error: err.message }); - return responseJSONBody(errors.ServiceUnavailable, null, - response, log); + { error: err.message }); + responseJSONBody(errors.ServiceUnavailable, null, response, log); + return callback(errors.ServiceUnavailable); }); }, 's3', requestContexts); } @@ -1485,7 +1485,8 @@ function routeBackbeat(clientIP, request, response, log) { resourceType: request.resourceType, query: request.query, }); - return responseJSONBody(errors.MethodNotAllowed, null, response, log); + responseJSONBody(errors.MethodNotAllowed, null, response, log); + return callback(errors.MethodNotAllowed); } return async.waterfall([next => auth.server.doAuth( @@ -1501,6 +1502,24 @@ function routeBackbeat(clientIP, request, response, log) { } // eslint-disable-next-line no-param-reassign request.accountQuotas = infos?.accountQuota; + if (authorizationResults) { + const checkedResults = checkAuthResults(authorizationResults, apiMethods[0], log); + if (checkedResults instanceof Error) { + return callback(errors.MethodNotAllowed); + } + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = checkedResults.isImplicitDeny; + } else { + // create an object of keys apiMethods with all values to false: + // for backward compatibility, all apiMethods are allowed by default + // thus it is explicitly allowed, so implicit deny is false + // eslint-disable-next-line no-param-reassign + request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { + // eslint-disable-next-line no-param-reassign + acc[curr] = false; + return acc; + }, {}); + } return next(err, userInfo); }, 's3', requestContexts), (userInfo, next) => { @@ -1517,15 +1536,15 @@ function routeBackbeat(clientIP, request, response, log) { } if (request.resourceType === 'index') { - return routeIndexingAPIs(request, response, userInfo, log); + return routeIndexingAPIs(request, response, userInfo, log, callback); } const route = backbeatRoutes[request.method][request.resourceType]; return route(request, response, userInfo, log, err => { if (err) { - return responseJSONBody(err, null, response, log); + responseJSONBody(err, null, response, log); } - return undefined; + return callback(err); }); } @@ -1561,8 +1580,8 @@ function routeBackbeat(clientIP, request, response, log) { return backbeatRoutes[request.method][request.resourceType]( request, response, log, next); } - return backbeatRoutes[request.method][request.resourceType] - [request.query.operation](request, response, log, next); + return backbeatRoutes[request.method][request.resourceType][request.query.operation]( + request, response, log, next); }], err => async.forEachLimit( // Finalizer hooks are used in a quota context and ensure consistent @@ -1573,16 +1592,20 @@ function routeBackbeat(clientIP, request, response, log) { (hook, done) => hook(err, done), () => { if (err) { - return responseJSONBody(err, null, response, log); + responseJSONBody(err, null, response, log); + return callback(err); } log.debug('backbeat route response sent successfully', { method: request.method, bucketName: request.bucketName, objectKey: request.objectKey }); - return undefined; + return callback(); }, )); } -module.exports = routeBackbeat; +module.exports = { + backbeatRoutes, + routeBackbeat, +}; diff --git a/lib/utilities/internalHandlers.js b/lib/utilities/internalHandlers.js index 96af32bc05..0d6537946b 100644 --- a/lib/utilities/internalHandlers.js +++ b/lib/utilities/internalHandlers.js @@ -1,4 +1,4 @@ -const routeBackbeat = require('../routes/routeBackbeat'); +const { routeBackbeat } = require('../routes/routeBackbeat'); const routeMetadata = require('../routes/routeMetadata'); const routeWorkflowEngineOperator = require('../routes/routeWorkflowEngineOperator'); diff --git a/tests/unit/DummyRequest.js b/tests/unit/DummyRequest.js index 28b21337eb..345927db83 100644 --- a/tests/unit/DummyRequest.js +++ b/tests/unit/DummyRequest.js @@ -25,6 +25,14 @@ class DummyRequest extends http.IncomingMessage { this.push(msg); } this.push(null); + if (!this.socket) { + this.socket = { + remoteAddress: '127.0.0.1', + destroy: () => {}, + on: () => {}, + removeListener: () => {}, + }; + } } } diff --git a/tests/unit/routeBackbeat.js b/tests/unit/routeBackbeat.js new file mode 100644 index 0000000000..42e57c6909 --- /dev/null +++ b/tests/unit/routeBackbeat.js @@ -0,0 +1,326 @@ +const sinon = require('sinon'); +const async = require('async'); +const assert = require('assert'); +const DummyRequest = require('./DummyRequest'); +const { routeBackbeat, backbeatRoutes } = require('../../lib/routes/routeBackbeat'); +const { bucketPut } = require('../../lib/api/bucketPut'); +const { makeAuthInfo, versioningTestUtils, DummyRequestLogger } = require('./helpers'); +const objectPut = require('../../lib/api/objectPut'); +const { auth, errors } = require('arsenal'); +const { default: AuthInfo } = require('arsenal/build/lib/auth/AuthInfo'); +const bucketPutVersioning = require('../../lib/api/bucketPutVersioning'); + +const log = new DummyRequestLogger(); +const bucketName = 'bucketname'; +const canonicalID = 'accessKey1'; +const authInfo = makeAuthInfo(canonicalID); +const namespace = 'default'; +const objectName = 'objectName'; +const postBody = Buffer.from('I am a body', 'utf8'); + +const testBucket = { + bucketName, + namespace, + headers: { + 'host': `${bucketName}.s3.amazonaws.com`, + // set versioning + + }, + url: `/${bucketName}`, + actionImplicitDenies: false, +}; + +const testObject = new DummyRequest({ + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-meta-test': 'some metadata', + 'content-length': '12', + }, + parsedContentLength: 12, + url: `/${bucketName}/${objectName}`, +}, postBody); + +describe('routeBackbeat', () => { + let request; + let response; + + beforeEach(() => { + // Mock backbeatRoutes + sinon.stub(backbeatRoutes, 'PUT').returns({ + data: sinon.stub(), + metadata: sinon.stub(), + multiplebackenddata: { + putobject: sinon.stub(), + putpart: sinon.stub(), + }, + }); + + sinon.stub(backbeatRoutes, 'POST').returns({ + multiplebackenddata: { + initiatempu: sinon.stub(), + completempu: sinon.stub(), + puttagging: sinon.stub(), + }, + batchdelete: sinon.stub(), + index: { + add: sinon.stub(), + delete: sinon.stub(), + }, + }); + + sinon.stub(backbeatRoutes, 'DELETE').returns({ + expiration: sinon.stub(), + multiplebackenddata: { + deleteobject: sinon.stub(), + deleteobjecttagging: sinon.stub(), + abortmpu: sinon.stub(), + }, + }); + + sinon.stub(backbeatRoutes, 'GET').returns({ + metadata: sinon.stub(), + multiplebackendmetadata: sinon.stub(), + lifecycle: sinon.stub(), + index: sinon.stub(), + }); + + // Mock request and response + request = new DummyRequest( + { + method: 'GET', + headers: { 'content-length': '123' }, + url: '/_/backbeat/multiplebackendmetadata/bucketName/objectKey?operation=putobject', + }, + 'body' + ); + response = { + setHeader: sinon.stub(), + writeHead: sinon.stub(), + end: sinon.stub().callsFake((body, format, cb) => cb()), + }; + }); + + afterEach(() => { + sinon.restore(); + }); + + it('should reject if the request is invalid', done => { + request.url = '/_/backbeat//bucketName/objectKey?operation=putobject'; + // Cover the case invalidRequest === true + routeBackbeat('127.0.0.1', request, response, log, err => { + assert(err.is.MethodNotAllowed); + done(); + }); + }); + + it('should reject if the route is invalid', done => { + request.url = '/_/backbeat/wrong/bucketName/objectKey?operation=putobject'; + // Cover the case invalidRoute === true + routeBackbeat('127.0.0.1', request, response, log, err => { + assert(err.is.MethodNotAllowed); + done(); + }); + }); + + [ + { + method: 'PUT', + resourceType: 'metadata', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: false, + // not sending any body here, so expect error + expect: errors.MalformedPOSTRequest, + }, + { + method: 'GET', + resourceType: 'metadata', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: true, + }, + { + method: 'PUT', + resourceType: 'data', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'PUT', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'putobject', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'PUT', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'putpart', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'DELETE', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'deleteobject', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'DELETE', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'abortmpu', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'DELETE', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'deleteobjecttagging', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'initiatempu', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'completempu', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'puttagging', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'GET', + resourceType: 'multiplebackendmetadata', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'batchdelete', + target: null, + operation: null, + versionId: false, + expect: errors.MalformedPOSTRequest, + }, + { + method: 'GET', + resourceType: 'lifecycle', + target: `${bucketName}?list-type=wrong`, + operation: null, + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'index', + target: bucketName, + operation: 'add', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'index', + target: bucketName, + operation: 'delete', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'GET', + resourceType: 'index', + target: null, + operation: 'delete', + versionId: false, + expect: errors.NotImplemented, + } + ].forEach(testCase => { + it(`should call method ${testCase.method} ${testCase.resourceType}`, done => { + let hasQuery = false; + let versionIdParsed = null; + request.method = testCase.method; + request.url = `/_/backbeat/${testCase.resourceType}/${testCase.target}`; + if (testCase.operation) { + request.url += `?operation=${testCase.operation}`; + hasQuery = true; + } + + // Mock auth server to ignore auth in this test + sinon.stub(auth.server, 'doAuth').callsFake((req, log, cb) => + cb(null, new AuthInfo({ + canonicalID: 'abcdef/lifecycle', + accountDisplayName: 'Lifecycle Service Account', + }), undefined, undefined, { + accountQuota: 1000, + }) + ); + + const enableVersioningRequest = + versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); + + return async.series([ + next => bucketPut(authInfo, testBucket, log, next), + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), + next => objectPut(authInfo, testObject, undefined, log, (err, res) => { + versionIdParsed = res['x-amz-version-id']; + if (testCase.versionId) { + request.url += `${(hasQuery ? '&' : '?')}&versionId=${versionIdParsed}`; + } + next(err); + }), + next => routeBackbeat('127.0.0.1', request, response, log, next), + ], err => { + if (testCase.expect) { + assert.strictEqual(err.code, testCase.expect.code); + return done(); + } + assert.ifError(err); + assert.strictEqual(Array.isArray(request.finalizerHooks), true); + assert.strictEqual(request.apiMethods[0], 'objectReplicate'); + assert.strictEqual(request.apiMethods.length, 1); + assert.strictEqual(request.accountQuotas, 1000); + return done(); + }); + }); + }); + + // Although the authz result is by default an implicit deny, the + // ACL should prevent any further processing for non-service or + // non-account identities. + it('should return access denied if doAuth returns an error', done => { + request.method = 'PUT'; + request.url = `/_/backbeat/metadata/${bucketName}/${objectName}?operation=putobject`; + + routeBackbeat('127.0.0.1', request, response, log, err => { + assert(err.is.AccessDenied); + done(); + }); + }); +}); From decfc8ff230393121fadcdb96384e1266dc529fd Mon Sep 17 00:00:00 2001 From: williamlardier Date: Tue, 10 Dec 2024 16:50:43 +0100 Subject: [PATCH 03/10] Unify implicit deny handling in normal and backbeat routes - Also split backbeat routers - Better use the callback functions - Do not return twice to the client in case of error and quota evaluation (finalizer hooks) - Remove account quota from backbeat proxy route: as not used in this case. Issue: CLDSRV-591 --- lib/api/api.js | 46 +++-- lib/routes/routeBackbeat.js | 332 +++++++++++++++++------------------- tests/unit/routeBackbeat.js | 5 - 3 files changed, 184 insertions(+), 199 deletions(-) diff --git a/lib/api/api.js b/lib/api/api.js index 5a98eb0da6..bb9390a042 100644 --- a/lib/api/api.js +++ b/lib/api/api.js @@ -119,6 +119,26 @@ function checkAuthResults(authResults, apiMethod, log) { } /* eslint-disable no-param-reassign */ +function handleAuthorizationResults(request, authorizationResults, apiMethod, returnTagCount, log, callback) { + if (authorizationResults) { + const checkedResults = checkAuthResults(authorizationResults, apiMethod, log); + if (checkedResults instanceof Error) { + return callback(checkedResults); + } + returnTagCount = checkedResults.returnTagCount; + request.actionImplicitDenies = checkedResults.isImplicitDeny; + } else { + // create an object of keys apiMethods with all values to false: + // for backward compatibility, all apiMethods are allowed by default + // thus it is explicitly allowed, so implicit deny is false + request.actionImplicitDenies = request.apiMethods.reduce((acc, curr) => { + acc[curr] = false; + return acc; + }, {}); + } + return callback(); +} + const api = { callApiMethod(apiMethod, request, response, log, callback) { // Attach the apiMethod method to the request, so it can used by monitoring in the server @@ -148,7 +168,7 @@ const api = { objectKey: request.objectKey, }); } - let returnTagCount = true; + const returnTagCount = true; const validationRes = validateQueryAndHeaders(request, log); if (validationRes.error) { @@ -263,27 +283,18 @@ const api = { return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); }, ), + (userInfo, authorizationResults, streamingV4Params, infos, next) => + handleAuthorizationResults(request, authorizationResults, apiMethod, returnTagCount, log, err => { + if (err) { + return next(err); + } + return next(null, userInfo, authorizationResults, streamingV4Params, infos); + }), ], (err, userInfo, authorizationResults, streamingV4Params, infos) => { if (err) { return callback(err); } request.accountQuotas = infos?.accountQuota; - if (authorizationResults) { - const checkedResults = checkAuthResults(authorizationResults, apiMethod, log); - if (checkedResults instanceof Error) { - return callback(checkedResults); - } - returnTagCount = checkedResults.returnTagCount; - request.actionImplicitDenies = checkedResults.isImplicitDeny; - } else { - // create an object of keys apiMethods with all values to false: - // for backward compatibility, all apiMethods are allowed by default - // thus it is explicitly allowed, so implicit deny is false - request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { - acc[curr] = false; - return acc; - }, {}); - } const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5, (hook, done) => hook(err, done), () => callback(err, ...results)); @@ -369,6 +380,7 @@ const api = { websiteGet: website, websiteHead: website, checkAuthResults, + handleAuthorizationResults, }; module.exports = api; diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index 7327a395df..462d75425a 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -23,10 +23,10 @@ const locationStorageCheck = require('../api/apiUtils/object/locationStorageCheck'); const { dataStore } = require('../api/apiUtils/object/storeObject'); const prepareRequestContexts = require( -'../api/apiUtils/authorization/prepareRequestContexts'); + '../api/apiUtils/authorization/prepareRequestContexts'); const { decodeVersionId } = require('../api/apiUtils/object/versioning'); const locationKeysHaveChanged - = require('../api/apiUtils/object/locationKeysHaveChanged'); + = require('../api/apiUtils/object/locationKeysHaveChanged'); const { standardMetadataValidateBucketAndObj, metadataGetObject } = require('../metadata/metadataUtils'); const { config } = require('../Config'); @@ -39,7 +39,7 @@ const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCu const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers'); const { objectDeleteInternal } = require('../api/objectDelete'); const { validateQuotas } = require('../api/apiUtils/quotas/quotaUtils'); -const { checkAuthResults } = require('../api/api'); +const { handleAuthorizationResults } = require('../api/api'); const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing; @@ -128,7 +128,7 @@ function _getRequestPayload(req, cb) { payload.push(chunk); payloadLen += chunk.length; }).on('error', cb) - .on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString())); + .on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString())); } function _checkMultipleBackendRequest(request, log) { @@ -195,7 +195,7 @@ function _checkMultipleBackendRequest(request, log) { const location = locationConstraints[headers['x-scal-storage-class']]; const storageTypeList = storageType.split(','); const isValidLocation = location && - storageTypeList.includes(location.type); + storageTypeList.includes(location.type); if (!isValidLocation) { errMessage = 'invalid request: invalid location constraint in request'; log.debug(errMessage, { @@ -301,19 +301,19 @@ function handleTaggingOperation(request, response, type, dataStoreVersionId, } } return dataClient.objectTagging(type, request.objectKey, - request.bucketName, objectMD, log, err => { - if (err) { - log.error(`error during object tagging: ${type}`, { - error: err, - method: 'handleTaggingOperation', - }); - return callback(err); - } - const dataRetrievalInfo = { - versionId: dataStoreVersionId, - }; - return _respond(response, dataRetrievalInfo, log, callback); - }); + request.bucketName, objectMD, log, err => { + if (err) { + log.error(`error during object tagging: ${type}`, { + error: err, + method: 'handleTaggingOperation', + }); + return callback(err); + } + const dataRetrievalInfo = { + versionId: dataStoreVersionId, + }; + return _respond(response, dataRetrievalInfo, log, callback); + }); } function _getLastModified(locations, log, cb) { @@ -557,7 +557,8 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) { } log.trace('putting object version', { - objectKey: request.objectKey, omVal, options }); + objectKey: request.objectKey, omVal, options + }); return metadata.putObjectMD(bucketName, objectKey, omVal, options, log, (err, md) => { if (err) { @@ -582,26 +583,26 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) { objectKey, }); async.eachLimit(objMd.location, 5, - (loc, next) => dataWrapper.data.delete(loc, log, err => { - if (err) { - log.warn('error removing old data location key', { + (loc, next) => dataWrapper.data.delete(loc, log, err => { + if (err) { + log.warn('error removing old data location key', { + bucketName, + objectKey, + locationKey: loc, + error: err.message, + }); + } + // do not forward the error to let other + // locations be deleted + next(); + }), + () => { + log.debug('done removing old data locations', { + method: 'putMetadata', bucketName, objectKey, - locationKey: loc, - error: err.message, }); - } - // do not forward the error to let other - // locations be deleted - next(); - }), - () => { - log.debug('done removing old data locations', { - method: 'putMetadata', - bucketName, - objectKey, }); - }); } return _respond(response, md, log, callback); }); @@ -910,7 +911,7 @@ function completeMultipartUpload(request, response, log, callback) { // lib/api/completeMultipartUpload.js. const { key, dataStoreType, dataStoreVersionId } = - retrievalInfo; + retrievalInfo; let size; let dataStoreETag; if (skipMpuPartProcessing(retrievalInfo)) { @@ -918,7 +919,7 @@ function completeMultipartUpload(request, response, log, callback) { dataStoreETag = retrievalInfo.eTag; } else { const { aggregateSize, aggregateETag } = - generateMpuAggregateInfo(parts); + generateMpuAggregateInfo(parts); size = aggregateSize; dataStoreETag = aggregateETag; } @@ -1307,35 +1308,39 @@ const indexEntrySchema = joi.object({ const indexingSchema = joi.array().items(indexEntrySchema).min(1); +function respondToRequest(err, response, log, callback) { + responseJSONBody(err, null, response, log); + // The callback is optional, as it is only used for testing purposes + // but value may be set to non-undefined or null due to the arsenal + // routes implementation + if (callback && typeof callback === 'function') { + return callback(err); + } + return undefined; +} + function routeIndexingAPIs(request, response, userInfo, log, callback) { const route = backbeatRoutes[request.method][request.resourceType]; if (!['GET', 'POST'].includes(request.method)) { - responseJSONBody(errors.MethodNotAllowed, null, response, log); - return callback(errors.MethodNotAllowed); + return respondToRequest(errors.MethodNotAllowed, response, log, callback); } if (request.method === 'GET') { - return route(request, response, userInfo, log, err => { - if (err) { - responseJSONBody(err, null, response, log); - } - return callback(err); - }); + return route(request, response, userInfo, log, err => + respondToRequest(err, response, log, callback)); } const op = request.query.operation; if (!op || typeof route[op] !== 'function') { log.error('Invalid operataion parameter', { operation: op }); - responseJSONBody(errors.BadRequest, null, response, log); - return callback(errors.BadRequest); + return respondToRequest(errors.BadRequest, response, log, callback); } return _getRequestPayload(request, (err, payload) => { if (err) { - responseJSONBody(err, null, response, log); - return callback(err); + return respondToRequest(err, response, log, callback); } let parsedIndex; @@ -1344,15 +1349,53 @@ function routeIndexingAPIs(request, response, userInfo, log, callback) { parsedIndex = joi.attempt(JSON.parse(payload), indexingSchema, 'invalid payload'); } catch (err) { log.error('Unable to parse index request body', { error: err }); - responseJSONBody(errors.BadRequest, null, response, log); - return callback(errors.BadRequest); + return respondToRequest(errors.BadRequest, response, log, callback); } - return route[op](parsedIndex, request, response, userInfo, log, err => { + return route[op](parsedIndex, request, response, userInfo, log, err => + respondToRequest(err, response, log, callback)); + }); +} + +function routeBackbeatAPIProxy(request, response, requestContexts, apiMethods, log, callback) { + const path = request.url.replace('/_/backbeat/api', '/_/'); + const { host, port } = config.backbeat; + const target = `http://${host}:${port}${path}`; + + return async.waterfall([ + next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults) => { if (err) { - responseJSONBody(err, null, response, log); + log.debug('authentication error', { + error: err, + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); } - return callback(err); + return next(err, userInfo, authorizationResults); + }, 's3', requestContexts), + (userInfo, authorizationResults, next) => handleAuthorizationResults( + authorizationResults, apiMethods[0], log, err => next(err, userInfo)), + ], (err, userInfo) => { + if (err) { + return respondToRequest(err, response, log, callback); + } + // FIXME for now, any authenticated user can access API + // routes. We should introduce admin accounts or accounts + // with admin privileges, and restrict access to those + // only. + if (userInfo.getCanonicalID() === constants.publicId) { + log.debug('unauthenticated access to API routes', { + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + return respondToRequest(errors.AccessDenied, response, log, callback); + } + return backbeatProxy.web(request, response, { target }, err => { + log.error('error proxying request to api server', + { error: err.message }); + return respondToRequest(errors.ServiceUnavailable, response, log, callback); }); }); } @@ -1397,66 +1440,9 @@ function routeBackbeat(clientIP, request, response, log, callback) { log.debug('unable to proxy backbeat api request', { backbeatConfig: config.backbeat, }); - responseJSONBody(errors.MethodNotAllowed, null, response, log); - return callback(errors.MethodNotAllowed); + return respondToRequest(errors.MethodNotAllowed, response, log, callback); } - const path = request.url.replace('/_/backbeat/api', '/_/'); - const { host, port } = config.backbeat; - const target = `http://${host}:${port}${path}`; - - // TODO CLDSRV-591: shall we use the authorization results here? - return auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - log.debug('authentication error', { - error: err, - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - responseJSONBody(err, null, response, log); - return callback(err); - } - // eslint-disable-next-line no-param-reassign - request.accountQuotas = infos?.accountQuota; - if (authorizationResults) { - const checkedResults = checkAuthResults(authorizationResults, apiMethods[0], log); - if (checkedResults instanceof Error) { - responseJSONBody(errors.AccessDenied, null, response, log); - return callback(errors.AccessDenied); - } - // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = checkedResults.isImplicitDeny; - } else { - // create an object of keys apiMethods with all values to false: - // for backward compatibility, all apiMethods are allowed by default - // thus it is explicitly allowed, so implicit deny is false - // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { - // eslint-disable-next-line no-param-reassign - acc[curr] = false; - return acc; - }, {}); - } - // FIXME for now, any authenticated user can access API - // routes. We should introduce admin accounts or accounts - // with admin privileges, and restrict access to those - // only. - if (userInfo.getCanonicalID() === constants.publicId) { - log.debug('unauthenticated access to API routes', { - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - responseJSONBody(errors.AccessDenied, null, response, log); - return callback(errors.AccessDenied); - } - return backbeatProxy.web(request, response, { target }, err => { - log.error('error proxying request to api server', - { error: err.message }); - responseJSONBody(errors.ServiceUnavailable, null, response, log); - return callback(errors.ServiceUnavailable); - }); - }, 's3', requestContexts); + return routeBackbeatAPIProxy(request, response, requestContexts, apiMethods, log, callback); } const useMultipleBackend = @@ -1485,67 +1471,49 @@ function routeBackbeat(clientIP, request, response, log, callback) { resourceType: request.resourceType, query: request.query, }); - responseJSONBody(errors.MethodNotAllowed, null, response, log); - return callback(errors.MethodNotAllowed); + return respondToRequest(errors.MethodNotAllowed, response, log, callback); } - return async.waterfall([next => auth.server.doAuth( - // TODO CLDSRV-591: shall we use the authorization results here? - request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - log.debug('authentication error', { - error: err, - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - } - // eslint-disable-next-line no-param-reassign - request.accountQuotas = infos?.accountQuota; - if (authorizationResults) { - const checkedResults = checkAuthResults(authorizationResults, apiMethods[0], log); - if (checkedResults instanceof Error) { - return callback(errors.MethodNotAllowed); + const isObjectRequest = _isObjectRequest(request); + + return async.waterfall([ + next => auth.server.doAuth( + request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { + if (err) { + log.debug('authentication error', { + error: err, + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); } // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = checkedResults.isImplicitDeny; - } else { - // create an object of keys apiMethods with all values to false: - // for backward compatibility, all apiMethods are allowed by default - // thus it is explicitly allowed, so implicit deny is false - // eslint-disable-next-line no-param-reassign - request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { - // eslint-disable-next-line no-param-reassign - acc[curr] = false; - return acc; - }, {}); - } - return next(err, userInfo); - }, 's3', requestContexts), + request.accountQuotas = infos?.accountQuota; + return next(err, userInfo, authorizationResults); + }, 's3', requestContexts), + (userInfo, authorizationResults, next) => + handleAuthorizationResults(request, authorizationResults, apiMethods[0], {}, log, err => + next(err, userInfo)), (userInfo, next) => { // TODO: understand why non-object requests (batchdelete) were not authenticated - if (!_isObjectRequest(request)) { + if (!isObjectRequest) { if (userInfo.getCanonicalID() === constants.publicId) { log.debug(`unauthenticated access to backbeat ${request.resourceType} routes`, { method: request.method, bucketName: request.bucketName, objectKey: request.objectKey, }); - return responseJSONBody( - errors.AccessDenied, null, response, log); + return next(errors.AccessDenied); } if (request.resourceType === 'index') { - return routeIndexingAPIs(request, response, userInfo, log, callback); + return routeIndexingAPIs(request, response, userInfo, log, + err => next(err, null, null)); } const route = backbeatRoutes[request.method][request.resourceType]; - return route(request, response, userInfo, log, err => { - if (err) { - responseJSONBody(err, null, response, log); - } - return callback(err); - }); + return route(request, response, userInfo, log, + err => next(err, null, null)); } const decodedVidResult = decodeVersionId(request.query); @@ -1554,7 +1522,7 @@ function routeBackbeat(clientIP, request, response, log, callback) { versionId: request.query.versionId, error: decodedVidResult, }); - return responseJSONBody(errors.InvalidArgument, null, response, log); + return next(errors.InvalidArgument); } const versionId = decodedVidResult; if (useMultipleBackend) { @@ -1569,9 +1537,14 @@ function routeBackbeat(clientIP, request, response, log, callback) { requestType: request.apiMethods || 'ReplicateObject', request, }; - return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, next); + return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, + (err, bucketMd, objMd) => next(err, bucketMd, objMd)); }, (bucketInfo, objMd, next) => { + // Function was already called + if (!isObjectRequest) { + return next(); + } if (!useMultipleBackend) { return backbeatRoutes[request.method][request.resourceType]( request, response, bucketInfo, objMd, log, next); @@ -1583,25 +1556,30 @@ function routeBackbeat(clientIP, request, response, log, callback) { return backbeatRoutes[request.method][request.resourceType][request.query.operation]( request, response, log, next); }], - err => async.forEachLimit( - // Finalizer hooks are used in a quota context and ensure consistent - // metrics in case of API errors. No operation required if the API - // completed successfully. - request.finalizerHooks, - 5, - (hook, done) => hook(err, done), - () => { - if (err) { - responseJSONBody(err, null, response, log); - return callback(err); - } - log.debug('backbeat route response sent successfully', - { method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey }); - return callback(); - }, - )); + err => { + if (err) { + return async.forEachLimit( + // Finalizer hooks are used in a quota context and ensure consistent + // metrics in case of API errors. No operation required if the API + // completed successfully. + request.finalizerHooks, + 5, + (hook, done) => hook(err, done), + () => { + if (err) { + return respondToRequest(err, response, log, callback); + } + log.debug('backbeat route response sent successfully', { + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey + }); + return respondToRequest(null, response, log, callback); + }, + ); + } + return respondToRequest(null, response, log, callback); + }); } diff --git a/tests/unit/routeBackbeat.js b/tests/unit/routeBackbeat.js index 42e57c6909..8dc881f258 100644 --- a/tests/unit/routeBackbeat.js +++ b/tests/unit/routeBackbeat.js @@ -23,8 +23,6 @@ const testBucket = { namespace, headers: { 'host': `${bucketName}.s3.amazonaws.com`, - // set versioning - }, url: `/${bucketName}`, actionImplicitDenies: false, @@ -47,7 +45,6 @@ describe('routeBackbeat', () => { let response; beforeEach(() => { - // Mock backbeatRoutes sinon.stub(backbeatRoutes, 'PUT').returns({ data: sinon.stub(), metadata: sinon.stub(), @@ -86,7 +83,6 @@ describe('routeBackbeat', () => { index: sinon.stub(), }); - // Mock request and response request = new DummyRequest( { method: 'GET', @@ -131,7 +127,6 @@ describe('routeBackbeat', () => { target: `${bucketName}/${objectName}`, operation: null, versionId: false, - // not sending any body here, so expect error expect: errors.MalformedPOSTRequest, }, { From 9481e78d548e8a4da3e7b67e72a761180a9b2c88 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Thu, 12 Dec 2024 12:09:37 +0100 Subject: [PATCH 04/10] Ensure coherence of callback parameters Issue: CLDSRV-591 --- lib/routes/routeBackbeat.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index 462d75425a..125fd8059c 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -1375,7 +1375,7 @@ function routeBackbeatAPIProxy(request, response, requestContexts, apiMethods, l return next(err, userInfo, authorizationResults); }, 's3', requestContexts), (userInfo, authorizationResults, next) => handleAuthorizationResults( - authorizationResults, apiMethods[0], log, err => next(err, userInfo)), + request, authorizationResults, apiMethods[0], undefined, log, err => next(err, userInfo)), ], (err, userInfo) => { if (err) { return respondToRequest(err, response, log, callback); From d8cd2e4e36d92f84baefab85821d91cb4a42d622 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Thu, 12 Dec 2024 12:13:29 +0100 Subject: [PATCH 05/10] Re-enable route backbeat tests - Update old ones - Fix some tests not compatible with existing code, after confirming the changes were expected - Do not run the tests with azure/aws real backend in some cases, because the backends are either not healthy, or the tests doesn't work. To be done separately. Issue: CLDSRV-591 --- .../aws-node-sdk/lib/utility/bucket-util.js | 2 - .../multipleBackend/delete/deleteAzure.js | 16 +- .../multipleBackend/mpuAbort/azureAbortMPU.js | 2 +- tests/multipleBackend/routes/routeBackbeat.js | 2111 +++++++++-------- 4 files changed, 1074 insertions(+), 1057 deletions(-) diff --git a/tests/functional/aws-node-sdk/lib/utility/bucket-util.js b/tests/functional/aws-node-sdk/lib/utility/bucket-util.js index eede136437..c5ae7952a7 100644 --- a/tests/functional/aws-node-sdk/lib/utility/bucket-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/bucket-util.js @@ -1,6 +1,4 @@ const bluebird = require('bluebird'); -const AWS = require('aws-sdk'); -AWS.config.logger = console; const { S3 } = require('aws-sdk'); const projectFixture = require('../fixtures/project'); const getConfig = require('../../test/support/config'); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js index 22e9d150fb..2fdcb7ffbf 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js @@ -79,7 +79,7 @@ function testSuite() { assert.equal(err, null, 'Expected success ' + `but got error ${err}`); setTimeout(() => azureClient.getContainerClient(azureContainerName) - .getProperties(keyName) + .getBlobClient(keyName).getProperties() .then(() => assert.fail('Expected error'), err => { assert.strictEqual(err.statusCode, 404); assert.strictEqual(err.code, 'NotFound'); @@ -112,13 +112,13 @@ function testSuite() { assert.equal(err, null, 'Expected success ' + `but got error ${err}`); setTimeout(() => - azureClient.getContainerClient(azureContainerName) - .getProperties(`${azureContainerName}/${this.test.azureObject}`) - .then(() => assert.fail('Expected error'), err => { - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NotFound'); - return done(); - }), azureTimeout); + azureClient.getContainerClient(azureContainerName) + .getBlobClient(`${azureContainerName}/${this.test.azureObject}`).getProperties() + .then(() => assert.fail('Expected error'), err => { + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NotFound'); + return done(); + }), azureTimeout); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js index 92c5e4c1d5..5519682ca0 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js @@ -18,7 +18,7 @@ let bucketUtil; let s3; function azureCheck(container, key, expected, cb) { - azureClient.getContainerClient(container).getProperties(key).then(res => { + azureClient.getContainerClient(container).getBlobClient(key).getProperties().then(res => { assert.ok(!expected.error); const convertedMD5 = convertMD5(res.contentSettings.contentMD5); assert.strictEqual(convertedMD5, expectedMD5); diff --git a/tests/multipleBackend/routes/routeBackbeat.js b/tests/multipleBackend/routes/routeBackbeat.js index 1302b548b5..59ac74d48f 100644 --- a/tests/multipleBackend/routes/routeBackbeat.js +++ b/tests/multipleBackend/routes/routeBackbeat.js @@ -9,7 +9,7 @@ const versionIdUtils = versioning.VersionID; const { makeid } = require('../../unit/helpers'); const { makeRequest, makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest'); const BucketUtility = - require('../../functional/aws-node-sdk/lib/utility/bucket-util'); + require('../../functional/aws-node-sdk/lib/utility/bucket-util'); const { describeSkipIfNotMultipleOrCeph, itSkipCeph, @@ -19,7 +19,7 @@ const { getAzureClient, } = require('../../functional/aws-node-sdk/test/multipleBackend/utils'); const { getRealAwsConfig } = - require('../../functional/aws-node-sdk/test/support/awsConfig'); + require('../../functional/aws-node-sdk/test/support/awsConfig'); const { getCredentials } = require('../../functional/aws-node-sdk/test/support/credentials'); const { config } = require('../../../lib/Config'); @@ -48,14 +48,14 @@ const testKey = 'testkey'; const testKeyUTF8 = '䆩鈁櫨㟔罳'; const testData = 'testkey data'; const testDataMd5 = crypto.createHash('md5') - .update(testData, 'utf-8') - .digest('hex'); + .update(testData, 'utf-8') + .digest('hex'); const emptyContentsMd5 = 'd41d8cd98f00b204e9800998ecf8427e'; const testMd = { 'md-model-version': 2, 'owner-display-name': 'Bart', 'owner-id': ('79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be'), + 'fd6e09d98eacf8f8d5218e7cd47ef2be'), 'last-modified': '2017-05-15T20:32:40.032Z', 'content-length': testData.length, 'content-md5': testDataMd5, @@ -87,7 +87,7 @@ const testMd = { const nonVersionedTestMd = { 'owner-display-name': 'Bart', 'owner-id': ('79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be'), + 'fd6e09d98eacf8f8d5218e7cd47ef2be'), 'content-length': testData.length, 'content-md5': testDataMd5, 'x-amz-version-id': 'null', @@ -212,12 +212,12 @@ function getMetadataToPut(putDataResponse) { // Reproduce what backbeat does to update target metadata mdToPut.location = JSON.parse(putDataResponse.body); ['x-amz-server-side-encryption', - 'x-amz-server-side-encryption-aws-kms-key-id', - 'x-amz-server-side-encryption-customer-algorithm'].forEach(headerName => { - if (putDataResponse.headers[headerName]) { - mdToPut[headerName] = putDataResponse.headers[headerName]; - } - }); + 'x-amz-server-side-encryption-aws-kms-key-id', + 'x-amz-server-side-encryption-customer-algorithm'].forEach(headerName => { + if (putDataResponse.headers[headerName]) { + mdToPut[headerName] = putDataResponse.headers[headerName]; + } + }); return mdToPut; } @@ -270,7 +270,7 @@ describe('backbeat routes', () => { .then(() => s3.deleteBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise()) .then(() => bucketUtil.empty(NONVERSIONED_BUCKET)) .then(() => s3.deleteBucket({ Bucket: NONVERSIONED_BUCKET }).promise()) - .then(() => done(), err => done(err)) + .then(() => done(), () => done()) ); describe('null version', () => { @@ -389,13 +389,13 @@ describe('backbeat routes', () => { enableVersioning: next => s3.putBucketVersioning( { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), putObjectAgain: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), + { Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -873,7 +873,7 @@ describe('backbeat routes', () => { // give some time for the async deletes to complete return setTimeout(() => checkVersionData(s3, bucket, keyName, expectedVersionId, testData, done), - 1000); + 1000); }); }); @@ -998,138 +998,138 @@ describe('backbeat routes', () => { }); it('should update null version if versioning suspended and null version has a version id and' + - 'put object afterward', done => { - let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + 'put object afterward', done => { + let objMD; + return async.series([ + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => makeBackbeatRequest({ + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + }), + next => makeBackbeatRequest({ + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], (err, data) => { if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + return done(err); } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[5]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert(!headObjectRes.StorageClass); + const headObjectRes = data[5]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert(!headObjectRes.StorageClass); - const listObjectVersionsRes = data[6]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + const listObjectVersionsRes = data[6]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, 'null'); - return done(); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, 'null'); + return done(); + }); }); - }); it('should update null version if versioning suspended and null version has a version id and' + - 'put version afterward', done => { - let objMD; - let expectedVersionId; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + 'put version afterward', done => { + let objMD; + let expectedVersionId; + return async.series([ + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => makeBackbeatRequest({ + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + }), + next => makeBackbeatRequest({ + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, next), + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], (err, data) => { if (err) { - return next(err); + return done(err); } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[6]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[6]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[7]; - const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); + const listObjectVersionsRes = data[7]; + const { Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 2); - const [currentVersion] = Versions.filter(v => v.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); + const [currentVersion] = Versions.filter(v => v.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); - const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); - assertVersionIsNullAndUpdated(nonCurrentVersion); - return done(); + const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); + assertVersionIsNullAndUpdated(nonCurrentVersion); + return done(); + }); }); - }); it('should update non-current null version if versioning suspended', done => { let expectedVersionId; @@ -1277,78 +1277,78 @@ describe('backbeat routes', () => { }); it('should update current null version if versioning suspended and put a null version ' + - 'afterwards', done => { - let objMD; - let deletedVersionId; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - deletedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + 'afterwards', done => { + let objMD; + let deletedVersionId; + return async.series([ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + deletedVersionId = data.VersionId; + return next(); + }), + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next), + next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), + next => makeBackbeatRequest({ + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + }), + next => makeBackbeatRequest({ + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], (err, data) => { if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + return done(err); } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[8]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert(!headObjectRes.StorageClass); + const headObjectRes = data[8]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert(!headObjectRes.StorageClass); - const listObjectVersionsRes = data[9]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + const listObjectVersionsRes = data[9]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, 'null'); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, 'null'); - return done(); + return done(); + }); }); - }); it('should update current null version if versioning suspended and put a version afterwards', done => { let objMD; @@ -1436,9 +1436,8 @@ describe('backbeat routes', () => { }); // TODO: CLDSRV-394 unskip routeBackbeat tests - describe.skip('backbeat PUT routes', () => { - describe('PUT data + metadata should create a new complete object', - () => { + describe('backbeat PUT routes', () => { + describe('PUT data + metadata should create a new complete object', () => { [{ caption: 'with ascii test key', key: testKey, encodedKey: testKey, @@ -1483,52 +1482,53 @@ describe('backbeat routes', () => { key, encodedKey: encodeURI(key), caption: `with key ${key}`, }))) - .forEach(testCase => { - it(testCase.caption, done => { - async.waterfall([next => { - const queryObj = testCase.legacyAPI ? {} : { v2: '' }; - makeBackbeatRequest({ - method: 'PUT', bucket: testCase.encryption ? - TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - objectKey: testCase.encodedKey, - resourceType: 'data', - queryObj, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = getMetadataToPut(response); - if (testCase.encryption && !testCase.legacyAPI) { - assert.strictEqual(typeof newMd.location[0].cryptoScheme, 'number'); - assert.strictEqual(typeof newMd.location[0].cipheredDataKey, 'string'); - } else { - // if no encryption or legacy API, data should not be encrypted - assert.strictEqual(newMd.location[0].cryptoScheme, undefined); - assert.strictEqual(newMd.location[0].cipheredDataKey, undefined); - } - makeBackbeatRequest({ - method: 'PUT', bucket: testCase.encryption ? - TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - objectKey: testCase.encodedKey, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - checkObjectData( - s3, testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - testCase.key, testData, next); - }], err => { - assert.ifError(err); - done(); + .forEach(testCase => { + it(testCase.caption, done => { + async.waterfall([next => { + const queryObj = testCase.legacyAPI ? {} : { v2: '' }; + makeBackbeatRequest({ + method: 'PUT', bucket: testCase.encryption ? + TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + objectKey: testCase.encodedKey, + resourceType: 'data', + queryObj, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + const newMd = getMetadataToPut(response); + if (testCase.encryption && !testCase.legacyAPI) { + assert.strictEqual(typeof newMd.location[0].cryptoScheme, 'number'); + assert.strictEqual(typeof newMd.location[0].cipheredDataKey, 'string'); + } else { + // if no encryption or legacy API, data should not be encrypted + assert.strictEqual(newMd.location[0].cryptoScheme, undefined); + assert.strictEqual(newMd.location[0].cipheredDataKey, undefined); + } + makeBackbeatRequest({ + method: 'PUT', bucket: testCase.encryption ? + TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + objectKey: testCase.encodedKey, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + checkObjectData( + s3, testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + testCase.key, testData, next); + }], err => { + assert.ifError(err); + done(); + }); }); }); - }); }); it('should PUT metadata for a non-versioned bucket', done => { @@ -1583,60 +1583,60 @@ describe('backbeat routes', () => { }); it('PUT metadata with "x-scal-replication-content: METADATA"' + - 'header should replicate metadata only', done => { - async.waterfall([next => { - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = getMetadataToPut(response); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // Don't update the sent metadata since it is sent by - // backbeat as received from the replication queue, - // without updated data location or encryption info - // (since that info is not known by backbeat) - const newMd = Object.assign({}, testMd); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'metadata', - headers: { 'x-scal-replication-content': 'METADATA' }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - checkObjectData(s3, TEST_ENCRYPTED_BUCKET, 'test-updatemd-key', - testData, next); - }], err => { - assert.ifError(err); - done(); + 'header should replicate metadata only', done => { + async.waterfall([next => { + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + const newMd = getMetadataToPut(response); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // Don't update the sent metadata since it is sent by + // backbeat as received from the replication queue, + // without updated data location or encryption info + // (since that info is not known by backbeat) + const newMd = Object.assign({}, testMd); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'metadata', + headers: { 'x-scal-replication-content': 'METADATA' }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + checkObjectData(s3, TEST_ENCRYPTED_BUCKET, 'test-updatemd-key', + testData, next); + }], err => { + assert.ifError(err); + done(); + }); }); - }); it('should PUT tags for a non-versioned bucket', function test(done) { this.timeout(10000); const bucket = NONVERSIONED_BUCKET; const awsBucket = - config.locationConstraints[awsLocation].details.bucketName; + config.locationConstraints[awsLocation].details.bucketName; const awsKey = uuidv4(); async.waterfall([ next => @@ -1676,280 +1676,287 @@ describe('backbeat routes', () => { }); it('should refuse PUT data if no x-scal-canonical-id header ' + - 'is provided', done => makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, - err => { - assert.strictEqual(err.code, 'BadRequest'); - done(); - })); + 'is provided', done => makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + err => { + assert.strictEqual(err.code, 'BadRequest'); + done(); + })); it('should refuse PUT in metadata-only mode if object does not exist', - done => { - async.waterfall([next => { - const newMd = Object.assign({}, testMd); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: 'does-not-exist', - resourceType: 'metadata', - headers: { 'x-scal-replication-content': 'METADATA' }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }], err => { - assert.strictEqual(err.statusCode, 404); - done(); + done => { + async.waterfall([next => { + const newMd = Object.assign({}, testMd); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: 'does-not-exist', + resourceType: 'metadata', + headers: { 'x-scal-replication-content': 'METADATA' }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }], err => { + assert.strictEqual(err.statusCode, 404); + done(); + }); }); - }); it('should remove old object data locations if version is overwritten ' + - 'with same contents', done => { - let oldLocation; - const testKeyOldData = `${testKey}-old-data`; - async.waterfall([next => { - // put object's data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put object metadata - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - oldLocation = newMd.location; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put another object which metadata reference the - // same data locations, we will attempt to retrieve - // this object at the end of the test to confirm that - // its locations have been deleted - const oldDataMd = Object.assign({}, testMd); - oldDataMd.location = oldLocation; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKeyOldData, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(oldDataMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // create new data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // overwrite the original object version, now - // with references to the new data locations - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // give some time for the async deletes to complete - setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, testData, next), - 1000); - }, next => { - // check that the object copy referencing the old data - // locations is unreadable, confirming that the old - // data locations have been deleted - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKeyOldData, - }, err => { - assert(err, 'expected error to get object with old data ' + - 'locations, got success'); - next(); + 'with same contents', done => { + let oldLocation; + const testKeyOldData = `${testKey}-old-data`; + async.waterfall([next => { + // put object's data locations + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put object metadata + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + oldLocation = newMd.location; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put another object which metadata reference the + // same data locations, we will attempt to retrieve + // this object at the end of the test to confirm that + // its locations have been deleted + const oldDataMd = Object.assign({}, testMd); + oldDataMd.location = oldLocation; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKeyOldData, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(oldDataMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // create new data locations + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // overwrite the original object version, now + // with references to the new data locations + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // give some time for the async deletes to complete + setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, testData, next), + 1000); + }, next => { + // check that the object copy referencing the old data + // locations is unreadable, confirming that the old + // data locations have been deleted + s3.getObject({ + Bucket: TEST_BUCKET, + Key: testKeyOldData, + }, err => { + assert(err, 'expected error to get object with old data ' + + 'locations, got success'); + next(); + }); + }], err => { + assert.ifError(err); + done(); }); - }], err => { - assert.ifError(err); - done(); }); - }); - it('should remove old object data locations if version is overwritten ' + - 'with empty contents', done => { - let oldLocation; - const testKeyOldData = `${testKey}-old-data`; - async.waterfall([next => { - // put object's data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put object metadata - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - oldLocation = newMd.location; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put another object which metadata reference the - // same data locations, we will attempt to retrieve - // this object at the end of the test to confirm that - // its locations have been deleted - const oldDataMd = Object.assign({}, testMd); - oldDataMd.location = oldLocation; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKeyOldData, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(oldDataMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // overwrite the original object version with an empty location - const newMd = Object.assign({}, testMd); - newMd['content-length'] = 0; - newMd['content-md5'] = emptyContentsMd5; - newMd.location = null; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // give some time for the async deletes to complete - setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, '', next), - 1000); - }, next => { - // check that the object copy referencing the old data - // locations is unreadable, confirming that the old - // data locations have been deleted - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKeyOldData, - }, err => { - assert(err, 'expected error to get object with old data ' + - 'locations, got success'); - next(); + // TODO: CLDSRV-394 unskip or delete this test + // The new data location is set to null when archiving to a Cold site. + // In that case "removing old data location key" is handled by the lifecycle + // transition processor. + it.skip('should remove old object data locations if version is overwritten ' + + 'with empty contents', done => { + let oldLocation; + const testKeyOldData = `${testKey}-old-data`; + async.waterfall([next => { + // put object's data locations + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put object metadata + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + oldLocation = newMd.location; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put another object which metadata reference the + // same data locations, we will attempt to retrieve + // this object at the end of the test to confirm that + // its locations have been deleted + const oldDataMd = Object.assign({}, testMd); + oldDataMd.location = oldLocation; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKeyOldData, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(oldDataMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // overwrite the original object version with an empty location + const newMd = Object.assign({}, testMd); + newMd['content-length'] = 0; + newMd['content-md5'] = emptyContentsMd5; + newMd.location = null; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // give some time for the async deletes to complete + setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, '', next), + 1000); + }, next => { + // check that the object copy referencing the old data + // locations is unreadable, confirming that the old + // data locations have been deleted + s3.getObject({ + Bucket: TEST_BUCKET, + Key: testKeyOldData, + }, err => { + assert(err, 'expected error to get object with old data ' + + 'locations, got success'); + next(); + }); + }], err => { + assert.ifError(err); + done(); }); - }], err => { - assert.ifError(err); - done(); }); - }); it('should not remove data locations on replayed metadata PUT', - done => { - let serializedNewMd; - async.waterfall([next => { - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - serializedNewMd = JSON.stringify(newMd); - async.timesSeries(2, (i, putDone) => makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: serializedNewMd, - }, (err, response) => { - assert.ifError(err); + done => { + let serializedNewMd; + async.waterfall([next => { + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { assert.strictEqual(response.statusCode, 200); - putDone(err); - }), () => next()); - }, next => { - // check that the object is still readable to make - // sure we did not remove the data keys - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKey, - }, (err, data) => { + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + serializedNewMd = JSON.stringify(newMd); + async.timesSeries(2, (i, putDone) => makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: serializedNewMd, + }, (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + putDone(err); + }), () => next()); + }, next => { + // check that the object is still readable to make + // sure we did not remove the data keys + s3.getObject({ + Bucket: TEST_BUCKET, + Key: testKey, + }, (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Body.toString(), testData); + next(); + }); + }], err => { assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); - next(); + done(); }); - }], err => { - assert.ifError(err); - done(); }); - }); it('should create a new version when no versionId is passed in query string', done => { - let newVersion; async.waterfall([next => { // put object's data locations makeBackbeatRequest({ @@ -1961,7 +1968,8 @@ describe('backbeat routes', () => { 'x-scal-canonical-id': testArn, }, authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); + requestBody: testData + }, next); }, (response, next) => { assert.strictEqual(response.statusCode, 200); // put object metadata @@ -1991,7 +1999,8 @@ describe('backbeat routes', () => { 'x-scal-canonical-id': testArn, }, authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); + requestBody: testData + }, next); }, (response, next) => { assert.strictEqual(response.statusCode, 200); // create a new version with the new data locations, @@ -2007,9 +2016,8 @@ describe('backbeat routes', () => { }, next); }, (response, next) => { assert.strictEqual(response.statusCode, 200); - const parsedResponse = JSON.parse(response.body); - newVersion = parsedResponse.versionId; - assert.notStrictEqual(newVersion, testMd.versionId); + // when no version id is provided, we return nothing + assert.strictEqual(response.body.length, 0); // give some time for the async deletes to complete, // then check that we can read the latest version setTimeout(() => s3.getObject({ @@ -2037,26 +2045,27 @@ describe('backbeat routes', () => { }); }); }); - describe.skip('backbeat authorization checks', () => { + + describe('backbeat authorization checks', () => { [{ method: 'PUT', resourceType: 'metadata' }, - { method: 'PUT', resourceType: 'data' }].forEach(test => { - const queryObj = test.resourceType === 'data' ? { v2: '' } : {}; - it(`${test.method} ${test.resourceType} should respond with ` + - '403 Forbidden if no credentials are provided', - done => { - makeBackbeatRequest({ - method: test.method, bucket: TEST_BUCKET, - objectKey: TEST_KEY, resourceType: test.resourceType, - queryObj, - }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); - }); - it(`${test.method} ${test.resourceType} should respond with ` + + { method: 'PUT', resourceType: 'data' }].forEach(test => { + const queryObj = test.resourceType === 'data' ? { v2: '' } : {}; + it(`${test.method} ${test.resourceType} should respond with ` + + '403 Forbidden if no credentials are provided', + done => { + makeBackbeatRequest({ + method: test.method, bucket: TEST_BUCKET, + objectKey: TEST_KEY, resourceType: test.resourceType, + queryObj, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + }); + }); + it(`${test.method} ${test.resourceType} should respond with ` + '403 Forbidden if wrong credentials are provided', done => { makeBackbeatRequest({ @@ -2068,14 +2077,14 @@ describe('backbeat routes', () => { secretKey: 'still wrong', }, }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'InvalidAccessKeyId'); - done(); - }); + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'InvalidAccessKeyId'); + done(); + }); }); - it(`${test.method} ${test.resourceType} should respond with ` + + it(`${test.method} ${test.resourceType} should respond with ` + '403 Forbidden if the account does not match the ' + 'backbeat user', done => { @@ -2088,14 +2097,14 @@ describe('backbeat routes', () => { secretKey: 'verySecretKey2', }, }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + }); }); - it(`${test.method} ${test.resourceType} should respond with ` + + it(`${test.method} ${test.resourceType} should respond with ` + '403 Forbidden if backbeat user has wrong secret key', done => { makeBackbeatRequest({ @@ -2107,55 +2116,55 @@ describe('backbeat routes', () => { secretKey: 'hastalavista', }, }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'SignatureDoesNotMatch'); - done(); - }); + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'SignatureDoesNotMatch'); + done(); + }); }); - }); + }); it('GET /_/backbeat/api/... should respond with ' + - '503 on authenticated requests (API server down)', - done => { - const options = { - authCredentials: { - accessKey: 'accessKey2', - secretKey: 'verySecretKey2', - }, - hostname: ipAddress, - port: 8000, - method: 'GET', - path: '/_/backbeat/api/crr/failed', - jsonResponse: true, - }; - makeRequest(options, err => { - assert(err); - assert.strictEqual(err.statusCode, 503); - assert.strictEqual(err.code, 'ServiceUnavailable'); - done(); - }); - }); + '503 on authenticated requests (API server down)', + done => { + const options = { + authCredentials: { + accessKey: 'accessKey2', + secretKey: 'verySecretKey2', + }, + hostname: ipAddress, + port: 8000, + method: 'GET', + path: '/_/backbeat/api/crr/failed', + jsonResponse: true, + }; + makeRequest(options, err => { + assert(err); + assert.strictEqual(err.statusCode, 503); + assert.strictEqual(err.code, 'ServiceUnavailable'); + done(); + }); + }); it('GET /_/backbeat/api/... should respond with ' + - '403 Forbidden if the request is unauthenticated', - done => { - const options = { - hostname: ipAddress, - port: 8000, - method: 'GET', - path: '/_/backbeat/api/crr/failed', - jsonResponse: true, - }; - makeRequest(options, err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); - }); + '403 Forbidden if the request is unauthenticated', + done => { + const options = { + hostname: ipAddress, + port: 8000, + method: 'GET', + path: '/_/backbeat/api/crr/failed', + jsonResponse: true, + }; + makeRequest(options, err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + }); + }); }); - describe.skip('GET Metadata route', () => { + describe('GET Metadata route', () => { beforeEach(done => makeBackbeatRequest({ method: 'PUT', bucket: TEST_BUCKET, objectKey: TEST_KEY, @@ -2192,189 +2201,189 @@ describe('backbeat routes', () => { versionId: versionIdUtils.encode(testMd.versionId), }, }, (err, data) => { - assert.strictEqual(data.statusCode, 404); - assert.strictEqual(JSON.parse(data.body).code, 'NoSuchBucket'); - done(); - }); - }); - - it('should return error if object does not exist', done => { - makeBackbeatRequest({ - method: 'GET', bucket: TEST_BUCKET, - objectKey: 'blah', resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - }, (err, data) => { - assert.strictEqual(data.statusCode, 404); - assert.strictEqual(JSON.parse(data.body).code, 'ObjNotFound'); - done(); - }); - }); - }); - describe.skip('backbeat multipart upload operations', function test() { - this.timeout(10000); - - // The ceph image does not support putting tags during initiate MPU. - itSkipCeph('should put tags if the source is AWS and tags are ' + - 'provided when initiating the multipart upload', done => { - const awsBucket = - config.locationConstraints[awsLocation].details.bucketName; - const awsKey = uuidv4(); - const multipleBackendPath = - `/_/backbeat/multiplebackenddata/${awsBucket}/${awsKey}`; - let uploadId; - let partData; - async.series([ - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'initiatempu' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), - }, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - uploadId = JSON.parse(data.body).uploadId; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'PUT', - path: multipleBackendPath, - queryObj: { operation: 'putpart' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-upload-id': uploadId, - 'x-scal-part-number': '1', - 'content-length': testData.length, - }, - requestBody: testData, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - const body = JSON.parse(data.body); - partData = [{ - PartNumber: [body.partNumber], - ETag: [body.ETag], - }]; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'completempu' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-upload-id': uploadId, - }, - requestBody: JSON.stringify(partData), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, [{ - Key: 'key1', - Value: 'value1', - }]); - next(); - }), - ], done); + assert.strictEqual(data.statusCode, 404); + assert.strictEqual(JSON.parse(data.body).code, 'NoSuchBucket'); + done(); + }); }); - it('should put tags if the source is Azure and tags are provided ' + - 'when completing the multipart upload', done => { - const containerName = getAzureContainerName(azureLocation); - const blob = uuidv4(); - const multipleBackendPath = - `/_/backbeat/multiplebackenddata/${containerName}/${blob}`; - const uploadId = uuidv4().replace(/-/g, ''); - let partData; - async.series([ - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'PUT', - path: multipleBackendPath, - queryObj: { operation: 'putpart' }, - headers: { - 'x-scal-storage-class': azureLocation, - 'x-scal-storage-type': 'azure', - 'x-scal-upload-id': uploadId, - 'x-scal-part-number': '1', - 'content-length': testData.length, - }, - requestBody: testData, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - const body = JSON.parse(data.body); - partData = [{ - PartNumber: [body.partNumber], - ETag: [body.ETag], - NumberSubParts: [body.numberSubParts], - }]; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'completempu' }, - headers: { - 'x-scal-storage-class': azureLocation, - 'x-scal-storage-type': 'azure', - 'x-scal-upload-id': uploadId, - 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), - }, - requestBody: JSON.stringify(partData), - jsonResponse: true, - }, next), - next => - azureClient.getBlobProperties( - containerName, blob, (err, result) => { + + it('should return error if object does not exist', done => { + makeBackbeatRequest({ + method: 'GET', bucket: TEST_BUCKET, + objectKey: 'blah', resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + }, (err, data) => { + assert.strictEqual(data.statusCode, 404); + assert.strictEqual(JSON.parse(data.body).code, 'ObjNotFound'); + done(); + }); + }); + }); + + describe('backbeat multipart upload operations', function test() { + this.timeout(10000); + + // The ceph image does not support putting tags during initiate MPU. + itSkipCeph('should put tags if the source is AWS and tags are ' + + 'provided when initiating the multipart upload', done => { + const awsBucket = + config.locationConstraints[awsLocation].details.bucketName; + const awsKey = uuidv4(); + const multipleBackendPath = + `/_/backbeat/multiplebackenddata/${awsBucket}/${awsKey}`; + let uploadId; + let partData; + async.series([ + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'initiatempu' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), + }, + jsonResponse: true, + }, (err, data) => { if (err) { return next(err); } - const tags = JSON.parse(result.metadata.tags); - assert.deepStrictEqual(tags, { key1: 'value1' }); + uploadId = JSON.parse(data.body).uploadId; return next(); }), - ], done); - }); + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'PUT', + path: multipleBackendPath, + queryObj: { operation: 'putpart' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-upload-id': uploadId, + 'x-scal-part-number': '1', + 'content-length': testData.length, + }, + requestBody: testData, + jsonResponse: true, + }, (err, data) => { + if (err) { + return next(err); + } + const body = JSON.parse(data.body); + partData = [{ + PartNumber: [body.partNumber], + ETag: [body.ETag], + }]; + return next(); + }), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'completempu' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-upload-id': uploadId, + }, + requestBody: JSON.stringify(partData), + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, [{ + Key: 'key1', + Value: 'value1', + }]); + next(); + }), + ], done); + }); + + it.skip('should put tags if the source is Azure and tags are provided ' + + 'when completing the multipart upload', done => { + const containerName = getAzureContainerName(azureLocation); + const blob = uuidv4(); + const multipleBackendPath = + `/_/backbeat/multiplebackenddata/${containerName}/${blob}`; + const uploadId = uuidv4().replace(/-/g, ''); + let partData; + async.series([ + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'PUT', + path: multipleBackendPath, + queryObj: { operation: 'putpart' }, + headers: { + 'x-scal-storage-class': azureLocation, + 'x-scal-storage-type': 'azure', + 'x-scal-upload-id': uploadId, + 'x-scal-part-number': '1', + 'content-length': testData.length, + }, + requestBody: testData, + jsonResponse: true, + }, (err, data) => { + if (err) { + return next(err); + } + const body = JSON.parse(data.body); + partData = [{ + PartNumber: [body.partNumber], + ETag: [body.ETag], + NumberSubParts: [body.numberSubParts], + }]; + return next(); + }), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'completempu' }, + headers: { + 'x-scal-storage-class': azureLocation, + 'x-scal-storage-type': 'azure', + 'x-scal-upload-id': uploadId, + 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), + }, + requestBody: JSON.stringify(partData), + jsonResponse: true, + }, next), + next => + azureClient.getContainerClient(containerName).getBlobClient(blob) + .getProperties().then(res => { + const tags = JSON.parse(res.metadata.tags); + assert.deepStrictEqual(tags, { key1: 'value1' }); + return next(); + }, assert.ifError), + ], done); + }); }); - describe.skip('Batch Delete Route', function test() { + + describe('Batch Delete Route', function test() { this.timeout(30000); it('should batch delete a local location', done => { let versionId; @@ -2417,7 +2426,7 @@ describe('backbeat routes', () => { method: 'POST', path: '/_/backbeat/batchdelete', requestBody: - `{"Locations":${JSON.stringify(location)}}`, + `{"Locations":${JSON.stringify(location)}}`, jsonResponse: true, }; makeRequest(options, done); @@ -2459,7 +2468,7 @@ describe('backbeat routes', () => { hostname: ipAddress, port: 8000, method: 'POST', - path: '/_/backbeat/batchdelete', + path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, requestBody: reqBody, jsonResponse: true, }; @@ -2504,7 +2513,7 @@ describe('backbeat routes', () => { method: 'POST', path: '/_/backbeat/batchdelete', requestBody: - '{"Locations":' + + '{"Locations":' + '[{"key":"abcdef","dataStoreName":"us-east-1"}]}', jsonResponse: true, }; @@ -2530,259 +2539,269 @@ describe('backbeat routes', () => { }); it('should not put delete tags if the source is not Azure and ' + - 'if-unmodified-since header is not provided', done => { - const awsKey = uuidv4(); - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + 'if-unmodified-since header is not provided', done => { + const awsKey = uuidv4(); + async.series([ + next => + awsClient.putObject({ + Bucket: awsBucket, + Key: awsKey, + }, next), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: awsKey, + dataStoreName: awsLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, []); + next(); }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, []); - next(); - }), - ], done); - }); + ], done); + }); it('should not put tags if the source is not Azure and ' + - 'if-unmodified-since condition is not met', done => { - const awsKey = uuidv4(); - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - headers: { - 'if-unmodified-since': - 'Sun, 31 Mar 2019 00:00:00 GMT', - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + 'if-unmodified-since condition is not met', done => { + const awsKey = uuidv4(); + async.series([ + next => + awsClient.putObject({ + Bucket: awsBucket, + Key: awsKey, + }, next), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, + headers: { + 'if-unmodified-since': + new Date(Date.now() + 86400000).toUTCString(), + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: awsKey, + dataStoreName: awsLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + if (err) { + return next(err); + } + if (data.TagSet.length !== 2) { + return next(new Error(`Expected 2 tags, got ${JSON.stringify(data)}`)); + } + return next(); }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, []); - next(); - }), - ], done); - }); + ], done); + }); it('should put tags if the source is not Azure and ' + - 'if-unmodified-since condition is met', done => { - const awsKey = uuidv4(); - let lastModified; - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - awsClient.headObject({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - if (err) { - return next(err); - } - lastModified = data.LastModified; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, - headers: { - 'if-unmodified-since': lastModified, - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', - }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], + 'if-unmodified-since condition is met', done => { + const awsKey = uuidv4(); + let lastModified; + async.series([ + next => + awsClient.putObject({ + Bucket: awsBucket, + Key: awsKey, + }, next), + next => + awsClient.headObject({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + if (err) { + return next(err); + } + lastModified = data.LastModified; + return next(); }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.TagSet.length, 2); - data.TagSet.forEach(tag => { - const { Key, Value } = tag; - const isValidTag = - Key === 'scal-delete-marker' || - Key === 'scal-delete-service'; - assert(isValidTag); - if (Key === 'scal-delete-marker') { - assert.strictEqual(Value, 'true'); + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, + headers: { + 'if-unmodified-since': lastModified, + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: awsKey, + dataStoreName: awsLocation, + }], + }), + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + if (err) { + return next(err); } - if (Key === 'scal-delete-service') { - assert.strictEqual( - Value, 'lifecycle-transition'); + if (data.TagSet.length !== 2) { + return next(new Error(`Expected 2 tags, got ${data.TagSet}`)); } - }); - next(); - }), - ], done); - }); + const errors = []; + data.TagSet.forEach(tag => { + const { Key, Value } = tag; + const isValidTag = + Key === 'scal-delete-marker' || + Key === 'scal-delete-service'; + if (!isValidTag) { + errors.push(`Invalid tag: ${Key}`); + } + if (Key === 'scal-delete-marker' && Value !== 'true') { + errors.push(`Invalid tag scal-delete-marker value: ${Value}`); + } + if (Key === 'scal-delete-service' && Value !== 'lifecycle-transition') { + errors.push(`Invalid tag scal-delete-service value: ${Value}`); + } + }); + if (errors.length === 0) { + return next(); + } + return next(new Error(errors.join(', '))); + }), + ], done); + }); - it('should not delete the object if the source is Azure and ' + - 'if-unmodified-since condition is not met', done => { - const blob = uuidv4(); - async.series([ - next => - azureClient.createBlockBlobFromText( - containerName, blob, 'a', null, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/batchdelete/${containerName}/${blob}`, - headers: { - 'if-unmodified-since': - 'Sun, 31 Mar 2019 00:00:00 GMT', - 'x-scal-storage-class': azureLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + it.skip('should not delete the object if the source is Azure and ' + + 'if-unmodified-since condition is not met', done => { + const blob = uuidv4(); + async.series([ + next => + azureClient.getContainerClient(containerName).getBlockBlobClient(blob) + .upload('a', 1, next), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: + `/_/backbeat/batchdelete/${containerName}/${blob}`, + headers: { + 'if-unmodified-since': + 'Sun, 31 Mar 2019 00:00:00 GMT', + 'x-scal-storage-class': azureLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: blob, + dataStoreName: azureLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: blob, - dataStoreName: azureLocation, - }], - }), - jsonResponse: true, - }, err => { - if (err && err.statusCode === 412) { - return next(); - } - return next(err); - }), - next => - azureClient.getBlobProperties( - containerName, blob, (err, result) => { - if (err) { - return next(err); + jsonResponse: true, + }, err => { + if (err && err.statusCode === 412) { + return next(); } + return next(err); + }), + next => + azureClient.getContainerClient(containerName).getProperties(blob).then(result => { assert(result); return next(); + }, err => { + next(new Error(`Error from Azure: ${err}`)); }), - ], done); - }); + ], done); + }); - it('should delete the object if the source is Azure and ' + - 'if-unmodified-since condition is met', done => { - const blob = uuidv4(); - let lastModified; - async.series([ - next => - azureClient.createBlockBlobFromText( - containerName, blob, 'a', null, next), - next => - azureClient.getBlobProperties( - containerName, blob, (err, result) => { - if (err) { - return next(err); - } + it.skip('should delete the object if the source is Azure and ' + + 'if-unmodified-since condition is met', done => { + const blob = uuidv4(); + let lastModified; + async.series([ + next => + azureClient.getContainerClient(containerName).getBlockBlobClient(blob) + .upload('a', 1, next), + next => + azureClient.getContainerClient(containerName).getProperties(blob).then(result => { lastModified = result.lastModified; return next(); + }, err => { + next(new Error(`Error from Azure: ${err}`)); }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/batchdelete/${containerName}/${blob}`, - headers: { - 'if-unmodified-since': lastModified, - 'x-scal-storage-class': azureLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: + `/_/backbeat/batchdelete/${containerName}/${blob}`, + headers: { + 'if-unmodified-since': lastModified, + 'x-scal-storage-class': azureLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: blob, + dataStoreName: azureLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: blob, - dataStoreName: azureLocation, - }], + jsonResponse: true, + }, next), + next => + azureClient.getContainerClient(containerName).getProperties(blob).then(() => { + next(new Error('Azure should return 404')); + }, err => { + next(err.statusCode === 404 ? null : err); }), - jsonResponse: true, - }, next), - next => - azureClient.getBlobProperties(containerName, blob, err => { - assert(err.statusCode === 404); - return next(); - }), - ], done); - }); + ], done); + }); }); }); From a21ea94931a4eed4418a41c56cbe2206494b44f0 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Thu, 12 Dec 2024 12:16:27 +0100 Subject: [PATCH 06/10] Disable ceph tests using AWS as the bucket does not exist anymore - To be re-enabled if we want to keep these tests - We also have a LocationNotFound error from AWS client, although properly configured in the location config file... Issue: CLDSRV-591 --- tests/multipleBackend/routes/routeBackbeat.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/multipleBackend/routes/routeBackbeat.js b/tests/multipleBackend/routes/routeBackbeat.js index 59ac74d48f..06251ba77a 100644 --- a/tests/multipleBackend/routes/routeBackbeat.js +++ b/tests/multipleBackend/routes/routeBackbeat.js @@ -1632,7 +1632,7 @@ describe('backbeat routes', () => { }); }); - it('should PUT tags for a non-versioned bucket', function test(done) { + itSkipCeph('should PUT tags for a non-versioned bucket', function test(done) { this.timeout(10000); const bucket = NONVERSIONED_BUCKET; const awsBucket = @@ -2441,7 +2441,8 @@ describe('backbeat routes', () => { }), ], done); }); - it('should batch delete a versioned AWS location', done => { + + itSkipCeph('should batch delete a versioned AWS location', done => { let versionId; const awsKey = `${TEST_BUCKET}/batch-delete-test-key-${makeid(8)}`; @@ -2581,7 +2582,7 @@ describe('backbeat routes', () => { ], done); }); - it('should not put tags if the source is not Azure and ' + + itSkipCeph('should not put tags if the source is not Azure and ' + 'if-unmodified-since condition is not met', done => { const awsKey = uuidv4(); async.series([ @@ -2630,7 +2631,7 @@ describe('backbeat routes', () => { ], done); }); - it('should put tags if the source is not Azure and ' + + itSkipCeph('should put tags if the source is not Azure and ' + 'if-unmodified-since condition is met', done => { const awsKey = uuidv4(); let lastModified; From 20412bcd700c9c94fcff3fc679c284167272fc5b Mon Sep 17 00:00:00 2001 From: williamlardier Date: Thu, 12 Dec 2024 13:35:03 +0100 Subject: [PATCH 07/10] Make java test independent from js tests - The bucket cleaning might not work during js tests, and can be expected - In such case, the java test should not enforce strict number of bucket, but use the existing number when starting... Issue: CLDSRV-591 --- .../jaws/src/test/java/com/scality/JavaTest.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/functional/jaws/src/test/java/com/scality/JavaTest.java b/tests/functional/jaws/src/test/java/com/scality/JavaTest.java index 283f94b24c..b5ce9e576c 100644 --- a/tests/functional/jaws/src/test/java/com/scality/JavaTest.java +++ b/tests/functional/jaws/src/test/java/com/scality/JavaTest.java @@ -49,14 +49,22 @@ public class JavaTest { } @Test public void testCreateBucket() throws Exception { + Object[] initialBuckets=getS3Client().listBuckets().toArray(); getS3Client().createBucket(bucketName); Object[] buckets=getS3Client().listBuckets().toArray(); - Assert.assertEquals(buckets.length,1); - Bucket bucket = (Bucket) buckets[0]; - Assert.assertEquals(bucketName, bucket.getName()); + Assert.assertEquals(buckets.length, initialBuckets.length + 1); + boolean bucketFound = false; + for (Object bucketObj : buckets) { + Bucket bucket = (Bucket) bucketObj; + if (bucketName.equals(bucket.getName())) { + bucketFound = true; + break; + } + } + Assert.assertTrue("Bucket not found in the list", bucketFound); getS3Client().deleteBucket(bucketName); Object[] bucketsAfter=getS3Client().listBuckets().toArray(); - Assert.assertEquals(bucketsAfter.length, 0); + Assert.assertEquals(bucketsAfter.length, initialBuckets.length); } } From 1e3ad11d012ffeb7d46960c6551ce7ae96851953 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Thu, 12 Dec 2024 14:09:52 +0100 Subject: [PATCH 08/10] Make js test independent from existing buckets - Same as java test, the scenario should never assume what the environment is, when the resources are shared. Issue: CLDSRV-591 --- .../aws-node-sdk/test/service/get.js | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/functional/aws-node-sdk/test/service/get.js b/tests/functional/aws-node-sdk/test/service/get.js index 3933bff012..bdaff8c98d 100644 --- a/tests/functional/aws-node-sdk/test/service/get.js +++ b/tests/functional/aws-node-sdk/test/service/get.js @@ -137,17 +137,24 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { }); it('should list buckets concurrently', done => { - async.times(20, (n, next) => { - s3.listBuckets((err, result) => { - assert.equal(result.Buckets.length, - createdBuckets.length, - 'Created buckets are missing in response'); - next(err); + s3.listBuckets((err, result) => { + if (err) { + return done(err); + } + const initialBucketCount = result.Buckets.length; + return async.times(20, (n, next) => { + s3.listBuckets((err, result) => { + if (err) { + return next(err); + } + assert.equal(result.Buckets.length, initialBucketCount, + 'The number of buckets has changed unexpectedly'); + return next(); + }); + }, err => { + assert.ifError(err, `error listing buckets: ${err}`); + return done(); }); - }, - err => { - assert.ifError(err, `error listing buckets: ${err}`); - done(); }); }); From 53abbd22e412cf79897e87dabdb4216c6dee8f7e Mon Sep 17 00:00:00 2001 From: williamlardier Date: Thu, 2 Jan 2025 10:58:11 +0100 Subject: [PATCH 09/10] Update documentation Issue: CLDSRV-591 --- docs/BACKBEAT_ROUTES.md | 5 +++-- lib/routes/routeBackbeat.js | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/BACKBEAT_ROUTES.md b/docs/BACKBEAT_ROUTES.md index fc1775fe9a..95a7645950 100644 --- a/docs/BACKBEAT_ROUTES.md +++ b/docs/BACKBEAT_ROUTES.md @@ -6,11 +6,11 @@ This special router is responsible for handling all the requests that are related to the Backbeat service. Backbeat may call any of the below APIs to perform operations on either data or s3 objects (metadata). -These route follow the same authorization and validation as the S3 routes: +These routes follow the same authorization and validation as the S3 routes: - Authorize the request with support for Implicit Denies from the IAM service. - Retrieve the bucket and object metadata if applicable. -- Evaluate the S3 Bucket Policies and ACLs before authozing the request. +- Evaluate the S3 Bucket Policies and ACLs before authorizing the request. - Backbeat routes are only authorized given the right permission, currently, `objectReplicate` as a unique permission for all these special routes. - In order to be authorized without S3 Bucket Policy, the caller must be @@ -25,6 +25,7 @@ PUT /_/backbeat/metadata// ``` To edit one existing S3 Object's metadata. +In the CRR case, this is used to put metadata for new objects. ```plaintext GET /_/backbeat/metadata//?versionId= diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index 125fd8059c..d196c7d0a5 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -1,3 +1,7 @@ +/* + * The APIs routes are documented under docs/BACKBEAT_ROUTES.md. + */ + const url = require('url'); const async = require('async'); const httpProxy = require('http-proxy'); From 34912f845e3668effbbec33dbf71de3fb93ab1a4 Mon Sep 17 00:00:00 2001 From: williamlardier Date: Thu, 2 Jan 2025 11:04:02 +0100 Subject: [PATCH 10/10] Update documentation use cases Issue: CLDSRV-591 --- docs/BACKBEAT_ROUTES.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/BACKBEAT_ROUTES.md b/docs/BACKBEAT_ROUTES.md index 95a7645950..ef52607f9f 100644 --- a/docs/BACKBEAT_ROUTES.md +++ b/docs/BACKBEAT_ROUTES.md @@ -45,56 +45,56 @@ PUT /_/backbeat/multiplebackenddata//?operation=putobje ``` To put directly to the storage layer the data for an existing S3 Object. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext PUT /_/backbeat/multiplebackenddata//?operation=putpart ``` To put directly to the storage layer the data for an existing S3 Object part. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext DELETE /_/backbeat/multiplebackenddata//?operation=deleteobject ``` To delete the data for an existing S3 Object. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext DELETE /_/backbeat/multiplebackenddata//?operation=abortmpu ``` To abort a multipart upload. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext DELETE /_/backbeat/multiplebackenddata//?operation=deleteobjecttagging ``` To delete the tagging for an existing S3 Object. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext POST /_/backbeat/multiplebackenddata//?operation=initiatempu ``` To initiate a multipart upload. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext POST /_/backbeat/multiplebackenddata//?operation=completempu ``` To complete a multipart upload. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext POST /_/backbeat/multiplebackenddata//?operation=puttagging ``` To put the tagging for an existing S3 Object. -Use case: Cross Region Replication (CRR). +Use case: Zenko Replication. ```plaintext GET /_/backbeat/multiplebackendmetadata//