diff --git a/docs/BACKBEAT_ROUTES.md b/docs/BACKBEAT_ROUTES.md new file mode 100644 index 0000000000..ef52607f9f --- /dev/null +++ b/docs/BACKBEAT_ROUTES.md @@ -0,0 +1,153 @@ +# Backbeat routes + +Backbeat routes are implemented in `lib/routes/routeBackbeat.js`. + +This special router is responsible for handling all the requests that are +related to the Backbeat service. Backbeat may call any of the below APIs to +perform operations on either data or s3 objects (metadata). + +These routes follow the same authorization and validation as the S3 routes: + +- Authorize the request with support for Implicit Denies from the IAM service. +- Retrieve the bucket and object metadata if applicable. +- Evaluate the S3 Bucket Policies and ACLs before authorizing the request. + - Backbeat routes are only authorized given the right permission, currently, + `objectReplicate` as a unique permission for all these special routes. + - In order to be authorized without S3 Bucket Policy, the caller must be + authorized by the IAM service and the ACLs. Service accounts and accounts + are allowed. +- Finally, evaluate the quotas before allowing the request to proceed. + +## List of supported APIs + +```plaintext +PUT /_/backbeat/metadata// +``` + +To edit one existing S3 Object's metadata. +In the CRR case, this is used to put metadata for new objects. + +```plaintext +GET /_/backbeat/metadata//?versionId= +``` + +To get one existing S3 Object's metadata. Version id can be specified to get +the metadata of a specific version. + +```plaintext +PUT /_/backbeat/data// +``` + +To put directly to the storage layer the data for an existing S3 Object. + +```plaintext +PUT /_/backbeat/multiplebackenddata//?operation=putobject +``` + +To put directly to the storage layer the data for an existing S3 Object. +Use case: Zenko Replication. + +```plaintext +PUT /_/backbeat/multiplebackenddata//?operation=putpart +``` + +To put directly to the storage layer the data for an existing S3 Object part. +Use case: Zenko Replication. + +```plaintext +DELETE /_/backbeat/multiplebackenddata//?operation=deleteobject +``` + +To delete the data for an existing S3 Object. +Use case: Zenko Replication. + +```plaintext +DELETE /_/backbeat/multiplebackenddata//?operation=abortmpu +``` + +To abort a multipart upload. +Use case: Zenko Replication. + +```plaintext +DELETE /_/backbeat/multiplebackenddata//?operation=deleteobjecttagging +``` + +To delete the tagging for an existing S3 Object. +Use case: Zenko Replication. + +```plaintext +POST /_/backbeat/multiplebackenddata//?operation=initiatempu +``` + +To initiate a multipart upload. +Use case: Zenko Replication. + +```plaintext +POST /_/backbeat/multiplebackenddata//?operation=completempu +``` + +To complete a multipart upload. +Use case: Zenko Replication. + +```plaintext +POST /_/backbeat/multiplebackenddata//?operation=puttagging +``` + +To put the tagging for an existing S3 Object. +Use case: Zenko Replication. + +```plaintext +GET /_/backbeat/multiplebackendmetadata// +``` + +To get the metadata for an existing S3 Object. Similar to a S3 HeadObject. +Use case: Cross Region Replication (CRR). + +```plaintext +POST /_/backbeat/batchdelete +``` + +Delete a batch of objects froem the storage layer. +Use case: restored S3 Object expiration. + +```plaintext +GET /_/backbeat/lifecycle/?list-type=current +``` + +To list current S3 Object versions from an S3 Bucket. +Use case: lifecycle listings. + +```plaintext +GET /_/backbeat/lifecycle/?list-type=noncurrent +``` + +To list noncurrent S3 Object versions from an S3 Bucket. +Use case: lifecycle listings. + +```plaintext +GET /_/backbeat/lifecycle/?list-type=orphan +``` + +To list delete markers from an S3 Bucket. +Use case: lifecycle listings. + +```plaintext +POST /_/backbeat/index/?operation=add +``` + +To create an index for a bucket. +Use case: MongoDB backend. + +```plaintext +POST /_/backbeat/index/?operation=delete +``` + +To delete an index for a bucket. +Use case: MongoDB backend. + +```plaintext +GET /_/backbeat/index/ +``` + +To get the index for a bucket. +Use case: MongoDB backend. diff --git a/lib/api/api.js b/lib/api/api.js index bf0d9f65d4..bb9390a042 100644 --- a/lib/api/api.js +++ b/lib/api/api.js @@ -79,7 +79,66 @@ const monitoringMap = policies.actionMaps.actionMonitoringMapS3; auth.setHandler(vault); +function checkAuthResults(authResults, apiMethod, log) { + let returnTagCount = true; + const isImplicitDeny = {}; + let isOnlyImplicitDeny = true; + if (apiMethod === 'objectGet') { + if (!authResults[0].isAllowed && !authResults[0].isImplicit) { + log.trace('get object authorization denial from Vault'); + return errors.AccessDenied; + } + isImplicitDeny[authResults[0].action] = authResults[0].isImplicit; + if (!authResults[1].isAllowed) { + log.trace('get tagging authorization denial ' + + 'from Vault'); + returnTagCount = false; + } + } else { + for (let i = 0; i < authResults.length; i++) { + isImplicitDeny[authResults[i].action] = true; + if (!authResults[i].isAllowed && !authResults[i].isImplicit) { + // Any explicit deny rejects the current API call + log.trace('authorization denial from Vault'); + return errors.AccessDenied; + } + if (authResults[i].isAllowed) { + // If the action is allowed, the result is not implicit + // Deny. + isImplicitDeny[authResults[i].action] = false; + isOnlyImplicitDeny = false; + } + } + } + // These two APIs cannot use ACLs or Bucket Policies, hence, any + // implicit deny from vault must be treated as an explicit deny. + if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) { + return errors.AccessDenied; + } + return { returnTagCount, isImplicitDeny }; +} + /* eslint-disable no-param-reassign */ +function handleAuthorizationResults(request, authorizationResults, apiMethod, returnTagCount, log, callback) { + if (authorizationResults) { + const checkedResults = checkAuthResults(authorizationResults, apiMethod, log); + if (checkedResults instanceof Error) { + return callback(checkedResults); + } + returnTagCount = checkedResults.returnTagCount; + request.actionImplicitDenies = checkedResults.isImplicitDeny; + } else { + // create an object of keys apiMethods with all values to false: + // for backward compatibility, all apiMethods are allowed by default + // thus it is explicitly allowed, so implicit deny is false + request.actionImplicitDenies = request.apiMethods.reduce((acc, curr) => { + acc[curr] = false; + return acc; + }, {}); + } + return callback(); +} + const api = { callApiMethod(apiMethod, request, response, log, callback) { // Attach the apiMethod method to the request, so it can used by monitoring in the server @@ -109,7 +168,7 @@ const api = { objectKey: request.objectKey, }); } - let returnTagCount = true; + const returnTagCount = true; const validationRes = validateQueryAndHeaders(request, log); if (validationRes.error) { @@ -152,49 +211,6 @@ const api = { // eslint-disable-next-line no-param-reassign request.apiMethods = apiMethods; - function checkAuthResults(authResults) { - let returnTagCount = true; - const isImplicitDeny = {}; - let isOnlyImplicitDeny = true; - if (apiMethod === 'objectGet') { - // first item checks s3:GetObject(Version) action - if (!authResults[0].isAllowed && !authResults[0].isImplicit) { - log.trace('get object authorization denial from Vault'); - return errors.AccessDenied; - } - // TODO add support for returnTagCount in the bucket policy - // checks - isImplicitDeny[authResults[0].action] = authResults[0].isImplicit; - // second item checks s3:GetObject(Version)Tagging action - if (!authResults[1].isAllowed) { - log.trace('get tagging authorization denial ' + - 'from Vault'); - returnTagCount = false; - } - } else { - for (let i = 0; i < authResults.length; i++) { - isImplicitDeny[authResults[i].action] = true; - if (!authResults[i].isAllowed && !authResults[i].isImplicit) { - // Any explicit deny rejects the current API call - log.trace('authorization denial from Vault'); - return errors.AccessDenied; - } - if (authResults[i].isAllowed) { - // If the action is allowed, the result is not implicit - // Deny. - isImplicitDeny[authResults[i].action] = false; - isOnlyImplicitDeny = false; - } - } - } - // These two APIs cannot use ACLs or Bucket Policies, hence, any - // implicit deny from vault must be treated as an explicit deny. - if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) { - return errors.AccessDenied; - } - return { returnTagCount, isImplicitDeny }; - } - return async.waterfall([ next => auth.server.doAuth( request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { @@ -267,27 +283,18 @@ const api = { return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); }, ), + (userInfo, authorizationResults, streamingV4Params, infos, next) => + handleAuthorizationResults(request, authorizationResults, apiMethod, returnTagCount, log, err => { + if (err) { + return next(err); + } + return next(null, userInfo, authorizationResults, streamingV4Params, infos); + }), ], (err, userInfo, authorizationResults, streamingV4Params, infos) => { if (err) { return callback(err); } request.accountQuotas = infos?.accountQuota; - if (authorizationResults) { - const checkedResults = checkAuthResults(authorizationResults); - if (checkedResults instanceof Error) { - return callback(checkedResults); - } - returnTagCount = checkedResults.returnTagCount; - request.actionImplicitDenies = checkedResults.isImplicitDeny; - } else { - // create an object of keys apiMethods with all values to false: - // for backward compatibility, all apiMethods are allowed by default - // thus it is explicitly allowed, so implicit deny is false - request.actionImplicitDenies = apiMethods.reduce((acc, curr) => { - acc[curr] = false; - return acc; - }, {}); - } const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5, (hook, done) => hook(err, done), () => callback(err, ...results)); @@ -372,6 +379,8 @@ const api = { serviceGet, websiteGet: website, websiteHead: website, + checkAuthResults, + handleAuthorizationResults, }; module.exports = api; diff --git a/lib/routes/routeBackbeat.js b/lib/routes/routeBackbeat.js index d1b6743a9a..d196c7d0a5 100644 --- a/lib/routes/routeBackbeat.js +++ b/lib/routes/routeBackbeat.js @@ -1,3 +1,7 @@ +/* + * The APIs routes are documented under docs/BACKBEAT_ROUTES.md. + */ + const url = require('url'); const async = require('async'); const httpProxy = require('http-proxy'); @@ -23,10 +27,10 @@ const locationStorageCheck = require('../api/apiUtils/object/locationStorageCheck'); const { dataStore } = require('../api/apiUtils/object/storeObject'); const prepareRequestContexts = require( -'../api/apiUtils/authorization/prepareRequestContexts'); + '../api/apiUtils/authorization/prepareRequestContexts'); const { decodeVersionId } = require('../api/apiUtils/object/versioning'); const locationKeysHaveChanged - = require('../api/apiUtils/object/locationKeysHaveChanged'); + = require('../api/apiUtils/object/locationKeysHaveChanged'); const { standardMetadataValidateBucketAndObj, metadataGetObject } = require('../metadata/metadataUtils'); const { config } = require('../Config'); @@ -39,6 +43,7 @@ const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCu const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers'); const { objectDeleteInternal } = require('../api/objectDelete'); const { validateQuotas } = require('../api/apiUtils/quotas/quotaUtils'); +const { handleAuthorizationResults } = require('../api/api'); const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing; @@ -127,7 +132,7 @@ function _getRequestPayload(req, cb) { payload.push(chunk); payloadLen += chunk.length; }).on('error', cb) - .on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString())); + .on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString())); } function _checkMultipleBackendRequest(request, log) { @@ -194,7 +199,7 @@ function _checkMultipleBackendRequest(request, log) { const location = locationConstraints[headers['x-scal-storage-class']]; const storageTypeList = storageType.split(','); const isValidLocation = location && - storageTypeList.includes(location.type); + storageTypeList.includes(location.type); if (!isValidLocation) { errMessage = 'invalid request: invalid location constraint in request'; log.debug(errMessage, { @@ -300,51 +305,21 @@ function handleTaggingOperation(request, response, type, dataStoreVersionId, } } return dataClient.objectTagging(type, request.objectKey, - request.bucketName, objectMD, log, err => { - if (err) { - log.error(`error during object tagging: ${type}`, { - error: err, - method: 'handleTaggingOperation', - }); - return callback(err); - } - const dataRetrievalInfo = { - versionId: dataStoreVersionId, - }; - return _respond(response, dataRetrievalInfo, log, callback); - }); + request.bucketName, objectMD, log, err => { + if (err) { + log.error(`error during object tagging: ${type}`, { + error: err, + method: 'handleTaggingOperation', + }); + return callback(err); + } + const dataRetrievalInfo = { + versionId: dataStoreVersionId, + }; + return _respond(response, dataRetrievalInfo, log, callback); + }); } -/* -PUT /_/backbeat/metadata// -GET /_/backbeat/metadata//?versionId= -PUT /_/backbeat/data// -PUT /_/backbeat/multiplebackenddata// - ?operation=putobject -PUT /_/backbeat/multiplebackenddata// - ?operation=putpart -DELETE /_/backbeat/multiplebackenddata// - ?operation=deleteobject -DELETE /_/backbeat/multiplebackenddata// - ?operation=abortmpu -DELETE /_/backbeat/multiplebackenddata// - ?operation=deleteobjecttagging -POST /_/backbeat/multiplebackenddata// - ?operation=initiatempu -POST /_/backbeat/multiplebackenddata// - ?operation=completempu -POST /_/backbeat/multiplebackenddata// - ?operation=puttagging -GET /_/backbeat/multiplebackendmetadata// -POST /_/backbeat/batchdelete -GET /_/backbeat/lifecycle/?list-type=current -GET /_/backbeat/lifecycle/?list-type=noncurrent -GET /_/backbeat/lifecycle/?list-type=orphan -POST /_/backbeat/index/?operation=add -POST /_/backbeat/index/?operation=delete -DELETE /_/backbeat/index/ -*/ - function _getLastModified(locations, log, cb) { const reqUids = log.getSerializedUids(); return dataClient.head(locations, reqUids, (err, data) => { @@ -586,7 +561,8 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) { } log.trace('putting object version', { - objectKey: request.objectKey, omVal, options }); + objectKey: request.objectKey, omVal, options + }); return metadata.putObjectMD(bucketName, objectKey, omVal, options, log, (err, md) => { if (err) { @@ -611,26 +587,26 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) { objectKey, }); async.eachLimit(objMd.location, 5, - (loc, next) => dataWrapper.data.delete(loc, log, err => { - if (err) { - log.warn('error removing old data location key', { + (loc, next) => dataWrapper.data.delete(loc, log, err => { + if (err) { + log.warn('error removing old data location key', { + bucketName, + objectKey, + locationKey: loc, + error: err.message, + }); + } + // do not forward the error to let other + // locations be deleted + next(); + }), + () => { + log.debug('done removing old data locations', { + method: 'putMetadata', bucketName, objectKey, - locationKey: loc, - error: err.message, }); - } - // do not forward the error to let other - // locations be deleted - next(); - }), - () => { - log.debug('done removing old data locations', { - method: 'putMetadata', - bucketName, - objectKey, }); - }); } return _respond(response, md, log, callback); }); @@ -939,7 +915,7 @@ function completeMultipartUpload(request, response, log, callback) { // lib/api/completeMultipartUpload.js. const { key, dataStoreType, dataStoreVersionId } = - retrievalInfo; + retrievalInfo; let size; let dataStoreETag; if (skipMpuPartProcessing(retrievalInfo)) { @@ -947,7 +923,7 @@ function completeMultipartUpload(request, response, log, callback) { dataStoreETag = retrievalInfo.eTag; } else { const { aggregateSize, aggregateETag } = - generateMpuAggregateInfo(parts); + generateMpuAggregateInfo(parts); size = aggregateSize; dataStoreETag = aggregateETag; } @@ -1336,32 +1312,39 @@ const indexEntrySchema = joi.object({ const indexingSchema = joi.array().items(indexEntrySchema).min(1); -function routeIndexingAPIs(request, response, userInfo, log) { +function respondToRequest(err, response, log, callback) { + responseJSONBody(err, null, response, log); + // The callback is optional, as it is only used for testing purposes + // but value may be set to non-undefined or null due to the arsenal + // routes implementation + if (callback && typeof callback === 'function') { + return callback(err); + } + return undefined; +} + +function routeIndexingAPIs(request, response, userInfo, log, callback) { const route = backbeatRoutes[request.method][request.resourceType]; if (!['GET', 'POST'].includes(request.method)) { - return responseJSONBody(errors.MethodNotAllowed, null, response, log); + return respondToRequest(errors.MethodNotAllowed, response, log, callback); } if (request.method === 'GET') { - return route(request, response, userInfo, log, err => { - if (err) { - return responseJSONBody(err, null, response, log); - } - return undefined; - }); + return route(request, response, userInfo, log, err => + respondToRequest(err, response, log, callback)); } const op = request.query.operation; if (!op || typeof route[op] !== 'function') { log.error('Invalid operataion parameter', { operation: op }); - return responseJSONBody(errors.BadRequest, null, response, log); + return respondToRequest(errors.BadRequest, response, log, callback); } return _getRequestPayload(request, (err, payload) => { if (err) { - return responseJSONBody(err, null, response, log); + return respondToRequest(err, response, log, callback); } let parsedIndex; @@ -1370,20 +1353,58 @@ function routeIndexingAPIs(request, response, userInfo, log) { parsedIndex = joi.attempt(JSON.parse(payload), indexingSchema, 'invalid payload'); } catch (err) { log.error('Unable to parse index request body', { error: err }); - return responseJSONBody(errors.BadRequest, null, response, log); + return respondToRequest(errors.BadRequest, response, log, callback); } - return route[op](parsedIndex, request, response, userInfo, log, err => { + return route[op](parsedIndex, request, response, userInfo, log, err => + respondToRequest(err, response, log, callback)); + }); +} + +function routeBackbeatAPIProxy(request, response, requestContexts, apiMethods, log, callback) { + const path = request.url.replace('/_/backbeat/api', '/_/'); + const { host, port } = config.backbeat; + const target = `http://${host}:${port}${path}`; + + return async.waterfall([ + next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults) => { if (err) { - return responseJSONBody(err, null, response, log); + log.debug('authentication error', { + error: err, + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); } - return undefined; + return next(err, userInfo, authorizationResults); + }, 's3', requestContexts), + (userInfo, authorizationResults, next) => handleAuthorizationResults( + request, authorizationResults, apiMethods[0], undefined, log, err => next(err, userInfo)), + ], (err, userInfo) => { + if (err) { + return respondToRequest(err, response, log, callback); + } + // FIXME for now, any authenticated user can access API + // routes. We should introduce admin accounts or accounts + // with admin privileges, and restrict access to those + // only. + if (userInfo.getCanonicalID() === constants.publicId) { + log.debug('unauthenticated access to API routes', { + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + return respondToRequest(errors.AccessDenied, response, log, callback); + } + return backbeatProxy.web(request, response, { target }, err => { + log.error('error proxying request to api server', + { error: err.message }); + return respondToRequest(errors.ServiceUnavailable, response, log, callback); }); }); } - -function routeBackbeat(clientIP, request, response, log) { +function routeBackbeat(clientIP, request, response, log, callback) { // Attach the apiMethod method to the request, so it can used by monitoring in the server // eslint-disable-next-line no-param-reassign request.apiMethod = 'routeBackbeat'; @@ -1411,52 +1432,21 @@ function routeBackbeat(clientIP, request, response, log) { // eslint-disable-next-line no-param-reassign request.finalizerHooks = []; + // Extract all the _apiMethods and store them in an array + const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : []; + // Attach the names to the current request + // eslint-disable-next-line no-param-reassign + request.apiMethods = apiMethods; + // proxy api requests to Backbeat API server if (request.resourceType === 'api') { if (!config.backbeat) { log.debug('unable to proxy backbeat api request', { backbeatConfig: config.backbeat, }); - return responseJSONBody(errors.MethodNotAllowed, null, response, - log); + return respondToRequest(errors.MethodNotAllowed, response, log, callback); } - const path = request.url.replace('/_/backbeat/api', '/_/'); - const { host, port } = config.backbeat; - const target = `http://${host}:${port}${path}`; - - // TODO CLDSRV-591: shall we use the authorization results here? - return auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - log.debug('authentication error', { - error: err, - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - return responseJSONBody(err, null, response, log); - } - // eslint-disable-next-line no-param-reassign - request.accountQuotas = infos?.accountQuota; - // FIXME for now, any authenticated user can access API - // routes. We should introduce admin accounts or accounts - // with admin privileges, and restrict access to those - // only. - if (userInfo.getCanonicalID() === constants.publicId) { - log.debug('unauthenticated access to API routes', { - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - return responseJSONBody( - errors.AccessDenied, null, response, log); - } - return backbeatProxy.web(request, response, { target }, err => { - log.error('error proxying request to api server', - { error: err.message }); - return responseJSONBody(errors.ServiceUnavailable, null, - response, log); - }); - }, 's3', requestContexts); + return routeBackbeatAPIProxy(request, response, requestContexts, apiMethods, log, callback); } const useMultipleBackend = @@ -1485,48 +1475,49 @@ function routeBackbeat(clientIP, request, response, log) { resourceType: request.resourceType, query: request.query, }); - return responseJSONBody(errors.MethodNotAllowed, null, response, log); + return respondToRequest(errors.MethodNotAllowed, response, log, callback); } - return async.waterfall([next => auth.server.doAuth( - // TODO CLDSRV-591: shall we use the authorization results here? - request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (err) { - log.debug('authentication error', { - error: err, - method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey, - }); - } - // eslint-disable-next-line no-param-reassign - request.accountQuotas = infos?.accountQuota; - return next(err, userInfo); - }, 's3', requestContexts), + const isObjectRequest = _isObjectRequest(request); + + return async.waterfall([ + next => auth.server.doAuth( + request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { + if (err) { + log.debug('authentication error', { + error: err, + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey, + }); + } + // eslint-disable-next-line no-param-reassign + request.accountQuotas = infos?.accountQuota; + return next(err, userInfo, authorizationResults); + }, 's3', requestContexts), + (userInfo, authorizationResults, next) => + handleAuthorizationResults(request, authorizationResults, apiMethods[0], {}, log, err => + next(err, userInfo)), (userInfo, next) => { // TODO: understand why non-object requests (batchdelete) were not authenticated - if (!_isObjectRequest(request)) { + if (!isObjectRequest) { if (userInfo.getCanonicalID() === constants.publicId) { log.debug(`unauthenticated access to backbeat ${request.resourceType} routes`, { method: request.method, bucketName: request.bucketName, objectKey: request.objectKey, }); - return responseJSONBody( - errors.AccessDenied, null, response, log); + return next(errors.AccessDenied); } if (request.resourceType === 'index') { - return routeIndexingAPIs(request, response, userInfo, log); + return routeIndexingAPIs(request, response, userInfo, log, + err => next(err, null, null)); } const route = backbeatRoutes[request.method][request.resourceType]; - return route(request, response, userInfo, log, err => { - if (err) { - return responseJSONBody(err, null, response, log); - } - return undefined; - }); + return route(request, response, userInfo, log, + err => next(err, null, null)); } const decodedVidResult = decodeVersionId(request.query); @@ -1535,7 +1526,7 @@ function routeBackbeat(clientIP, request, response, log) { versionId: request.query.versionId, error: decodedVidResult, }); - return responseJSONBody(errors.InvalidArgument, null, response, log); + return next(errors.InvalidArgument); } const versionId = decodedVidResult; if (useMultipleBackend) { @@ -1550,9 +1541,14 @@ function routeBackbeat(clientIP, request, response, log) { requestType: request.apiMethods || 'ReplicateObject', request, }; - return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, next); + return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, + (err, bucketMd, objMd) => next(err, bucketMd, objMd)); }, (bucketInfo, objMd, next) => { + // Function was already called + if (!isObjectRequest) { + return next(); + } if (!useMultipleBackend) { return backbeatRoutes[request.method][request.resourceType]( request, response, bucketInfo, objMd, log, next); @@ -1561,28 +1557,37 @@ function routeBackbeat(clientIP, request, response, log) { return backbeatRoutes[request.method][request.resourceType]( request, response, log, next); } - return backbeatRoutes[request.method][request.resourceType] - [request.query.operation](request, response, log, next); + return backbeatRoutes[request.method][request.resourceType][request.query.operation]( + request, response, log, next); }], - err => async.forEachLimit( - // Finalizer hooks are used in a quota context and ensure consistent - // metrics in case of API errors. No operation required if the API - // completed successfully. - request.finalizerHooks, - 5, - (hook, done) => hook(err, done), - () => { - if (err) { - return responseJSONBody(err, null, response, log); - } - log.debug('backbeat route response sent successfully', - { method: request.method, - bucketName: request.bucketName, - objectKey: request.objectKey }); - return undefined; - }, - )); + err => { + if (err) { + return async.forEachLimit( + // Finalizer hooks are used in a quota context and ensure consistent + // metrics in case of API errors. No operation required if the API + // completed successfully. + request.finalizerHooks, + 5, + (hook, done) => hook(err, done), + () => { + if (err) { + return respondToRequest(err, response, log, callback); + } + log.debug('backbeat route response sent successfully', { + method: request.method, + bucketName: request.bucketName, + objectKey: request.objectKey + }); + return respondToRequest(null, response, log, callback); + }, + ); + } + return respondToRequest(null, response, log, callback); + }); } -module.exports = routeBackbeat; +module.exports = { + backbeatRoutes, + routeBackbeat, +}; diff --git a/lib/utilities/internalHandlers.js b/lib/utilities/internalHandlers.js index 96af32bc05..0d6537946b 100644 --- a/lib/utilities/internalHandlers.js +++ b/lib/utilities/internalHandlers.js @@ -1,4 +1,4 @@ -const routeBackbeat = require('../routes/routeBackbeat'); +const { routeBackbeat } = require('../routes/routeBackbeat'); const routeMetadata = require('../routes/routeMetadata'); const routeWorkflowEngineOperator = require('../routes/routeWorkflowEngineOperator'); diff --git a/tests/functional/aws-node-sdk/lib/utility/bucket-util.js b/tests/functional/aws-node-sdk/lib/utility/bucket-util.js index eede136437..c5ae7952a7 100644 --- a/tests/functional/aws-node-sdk/lib/utility/bucket-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/bucket-util.js @@ -1,6 +1,4 @@ const bluebird = require('bluebird'); -const AWS = require('aws-sdk'); -AWS.config.logger = console; const { S3 } = require('aws-sdk'); const projectFixture = require('../fixtures/project'); const getConfig = require('../../test/support/config'); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js index 22e9d150fb..2fdcb7ffbf 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/delete/deleteAzure.js @@ -79,7 +79,7 @@ function testSuite() { assert.equal(err, null, 'Expected success ' + `but got error ${err}`); setTimeout(() => azureClient.getContainerClient(azureContainerName) - .getProperties(keyName) + .getBlobClient(keyName).getProperties() .then(() => assert.fail('Expected error'), err => { assert.strictEqual(err.statusCode, 404); assert.strictEqual(err.code, 'NotFound'); @@ -112,13 +112,13 @@ function testSuite() { assert.equal(err, null, 'Expected success ' + `but got error ${err}`); setTimeout(() => - azureClient.getContainerClient(azureContainerName) - .getProperties(`${azureContainerName}/${this.test.azureObject}`) - .then(() => assert.fail('Expected error'), err => { - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NotFound'); - return done(); - }), azureTimeout); + azureClient.getContainerClient(azureContainerName) + .getBlobClient(`${azureContainerName}/${this.test.azureObject}`).getProperties() + .then(() => assert.fail('Expected error'), err => { + assert.strictEqual(err.statusCode, 404); + assert.strictEqual(err.code, 'NotFound'); + return done(); + }), azureTimeout); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js index 92c5e4c1d5..5519682ca0 100644 --- a/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js +++ b/tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/azureAbortMPU.js @@ -18,7 +18,7 @@ let bucketUtil; let s3; function azureCheck(container, key, expected, cb) { - azureClient.getContainerClient(container).getProperties(key).then(res => { + azureClient.getContainerClient(container).getBlobClient(key).getProperties().then(res => { assert.ok(!expected.error); const convertedMD5 = convertMD5(res.contentSettings.contentMD5); assert.strictEqual(convertedMD5, expectedMD5); diff --git a/tests/functional/aws-node-sdk/test/service/get.js b/tests/functional/aws-node-sdk/test/service/get.js index 3933bff012..bdaff8c98d 100644 --- a/tests/functional/aws-node-sdk/test/service/get.js +++ b/tests/functional/aws-node-sdk/test/service/get.js @@ -137,17 +137,24 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() { }); it('should list buckets concurrently', done => { - async.times(20, (n, next) => { - s3.listBuckets((err, result) => { - assert.equal(result.Buckets.length, - createdBuckets.length, - 'Created buckets are missing in response'); - next(err); + s3.listBuckets((err, result) => { + if (err) { + return done(err); + } + const initialBucketCount = result.Buckets.length; + return async.times(20, (n, next) => { + s3.listBuckets((err, result) => { + if (err) { + return next(err); + } + assert.equal(result.Buckets.length, initialBucketCount, + 'The number of buckets has changed unexpectedly'); + return next(); + }); + }, err => { + assert.ifError(err, `error listing buckets: ${err}`); + return done(); }); - }, - err => { - assert.ifError(err, `error listing buckets: ${err}`); - done(); }); }); diff --git a/tests/functional/jaws/src/test/java/com/scality/JavaTest.java b/tests/functional/jaws/src/test/java/com/scality/JavaTest.java index 283f94b24c..b5ce9e576c 100644 --- a/tests/functional/jaws/src/test/java/com/scality/JavaTest.java +++ b/tests/functional/jaws/src/test/java/com/scality/JavaTest.java @@ -49,14 +49,22 @@ public class JavaTest { } @Test public void testCreateBucket() throws Exception { + Object[] initialBuckets=getS3Client().listBuckets().toArray(); getS3Client().createBucket(bucketName); Object[] buckets=getS3Client().listBuckets().toArray(); - Assert.assertEquals(buckets.length,1); - Bucket bucket = (Bucket) buckets[0]; - Assert.assertEquals(bucketName, bucket.getName()); + Assert.assertEquals(buckets.length, initialBuckets.length + 1); + boolean bucketFound = false; + for (Object bucketObj : buckets) { + Bucket bucket = (Bucket) bucketObj; + if (bucketName.equals(bucket.getName())) { + bucketFound = true; + break; + } + } + Assert.assertTrue("Bucket not found in the list", bucketFound); getS3Client().deleteBucket(bucketName); Object[] bucketsAfter=getS3Client().listBuckets().toArray(); - Assert.assertEquals(bucketsAfter.length, 0); + Assert.assertEquals(bucketsAfter.length, initialBuckets.length); } } diff --git a/tests/multipleBackend/routes/routeBackbeat.js b/tests/multipleBackend/routes/routeBackbeat.js index 1302b548b5..06251ba77a 100644 --- a/tests/multipleBackend/routes/routeBackbeat.js +++ b/tests/multipleBackend/routes/routeBackbeat.js @@ -9,7 +9,7 @@ const versionIdUtils = versioning.VersionID; const { makeid } = require('../../unit/helpers'); const { makeRequest, makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest'); const BucketUtility = - require('../../functional/aws-node-sdk/lib/utility/bucket-util'); + require('../../functional/aws-node-sdk/lib/utility/bucket-util'); const { describeSkipIfNotMultipleOrCeph, itSkipCeph, @@ -19,7 +19,7 @@ const { getAzureClient, } = require('../../functional/aws-node-sdk/test/multipleBackend/utils'); const { getRealAwsConfig } = - require('../../functional/aws-node-sdk/test/support/awsConfig'); + require('../../functional/aws-node-sdk/test/support/awsConfig'); const { getCredentials } = require('../../functional/aws-node-sdk/test/support/credentials'); const { config } = require('../../../lib/Config'); @@ -48,14 +48,14 @@ const testKey = 'testkey'; const testKeyUTF8 = '䆩鈁櫨㟔罳'; const testData = 'testkey data'; const testDataMd5 = crypto.createHash('md5') - .update(testData, 'utf-8') - .digest('hex'); + .update(testData, 'utf-8') + .digest('hex'); const emptyContentsMd5 = 'd41d8cd98f00b204e9800998ecf8427e'; const testMd = { 'md-model-version': 2, 'owner-display-name': 'Bart', 'owner-id': ('79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be'), + 'fd6e09d98eacf8f8d5218e7cd47ef2be'), 'last-modified': '2017-05-15T20:32:40.032Z', 'content-length': testData.length, 'content-md5': testDataMd5, @@ -87,7 +87,7 @@ const testMd = { const nonVersionedTestMd = { 'owner-display-name': 'Bart', 'owner-id': ('79a59df900b949e55d96a1e698fbaced' + - 'fd6e09d98eacf8f8d5218e7cd47ef2be'), + 'fd6e09d98eacf8f8d5218e7cd47ef2be'), 'content-length': testData.length, 'content-md5': testDataMd5, 'x-amz-version-id': 'null', @@ -212,12 +212,12 @@ function getMetadataToPut(putDataResponse) { // Reproduce what backbeat does to update target metadata mdToPut.location = JSON.parse(putDataResponse.body); ['x-amz-server-side-encryption', - 'x-amz-server-side-encryption-aws-kms-key-id', - 'x-amz-server-side-encryption-customer-algorithm'].forEach(headerName => { - if (putDataResponse.headers[headerName]) { - mdToPut[headerName] = putDataResponse.headers[headerName]; - } - }); + 'x-amz-server-side-encryption-aws-kms-key-id', + 'x-amz-server-side-encryption-customer-algorithm'].forEach(headerName => { + if (putDataResponse.headers[headerName]) { + mdToPut[headerName] = putDataResponse.headers[headerName]; + } + }); return mdToPut; } @@ -270,7 +270,7 @@ describe('backbeat routes', () => { .then(() => s3.deleteBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise()) .then(() => bucketUtil.empty(NONVERSIONED_BUCKET)) .then(() => s3.deleteBucket({ Bucket: NONVERSIONED_BUCKET }).promise()) - .then(() => done(), err => done(err)) + .then(() => done(), () => done()) ); describe('null version', () => { @@ -389,13 +389,13 @@ describe('backbeat routes', () => { enableVersioning: next => s3.putBucketVersioning( { Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next), putObjectAgain: next => s3.putObject( - { Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - expectedVersionId = data.VersionId; - return next(); - }), + { Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), getMetadata: next => makeBackbeatRequest({ method: 'GET', resourceType: 'metadata', @@ -873,7 +873,7 @@ describe('backbeat routes', () => { // give some time for the async deletes to complete return setTimeout(() => checkVersionData(s3, bucket, keyName, expectedVersionId, testData, done), - 1000); + 1000); }); }); @@ -998,138 +998,138 @@ describe('backbeat routes', () => { }); it('should update null version if versioning suspended and null version has a version id and' + - 'put object afterward', done => { - let objMD; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + 'put object afterward', done => { + let objMD; + return async.series([ + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => makeBackbeatRequest({ + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + }), + next => makeBackbeatRequest({ + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], (err, data) => { if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + return done(err); } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[5]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert(!headObjectRes.StorageClass); + const headObjectRes = data[5]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert(!headObjectRes.StorageClass); - const listObjectVersionsRes = data[6]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + const listObjectVersionsRes = data[6]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, 'null'); - return done(); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, 'null'); + return done(); + }); }); - }); it('should update null version if versioning suspended and null version has a version id and' + - 'put version afterward', done => { - let objMD; - let expectedVersionId; - return async.series([ - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { - if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); - } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + 'put version afterward', done => { + let objMD; + let expectedVersionId; + return async.series([ + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => makeBackbeatRequest({ + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + }), + next => makeBackbeatRequest({ + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, next), + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + expectedVersionId = data.VersionId; + return next(); + }), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], (err, data) => { if (err) { - return next(err); + return done(err); } - expectedVersionId = data.VersionId; - return next(); - }), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[6]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert.strictEqual(headObjectRes.StorageClass, storageClass); + const headObjectRes = data[6]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert.strictEqual(headObjectRes.StorageClass, storageClass); - const listObjectVersionsRes = data[7]; - const { Versions } = listObjectVersionsRes; - assert.strictEqual(Versions.length, 2); + const listObjectVersionsRes = data[7]; + const { Versions } = listObjectVersionsRes; + assert.strictEqual(Versions.length, 2); - const [currentVersion] = Versions.filter(v => v.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); + const [currentVersion] = Versions.filter(v => v.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId); - const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); - assertVersionIsNullAndUpdated(nonCurrentVersion); - return done(); + const [nonCurrentVersion] = Versions.filter(v => !v.IsLatest); + assertVersionIsNullAndUpdated(nonCurrentVersion); + return done(); + }); }); - }); it('should update non-current null version if versioning suspended', done => { let expectedVersionId; @@ -1277,78 +1277,78 @@ describe('backbeat routes', () => { }); it('should update current null version if versioning suspended and put a null version ' + - 'afterwards', done => { - let objMD; - let deletedVersionId; - return async.series([ - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, - next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { - if (err) { - return next(err); - } - deletedVersionId = data.VersionId; - return next(); - }), - next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, - next), - next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), - next => makeBackbeatRequest({ - method: 'GET', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - }, (err, data) => { + 'afterwards', done => { + let objMD; + let deletedVersionId; + return async.series([ + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, + next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => { + if (err) { + return next(err); + } + deletedVersionId = data.VersionId; + return next(); + }), + next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, + next), + next => s3.deleteObject({ Bucket: bucket, Key: keyName, VersionId: deletedVersionId }, next), + next => makeBackbeatRequest({ + method: 'GET', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + }, (err, data) => { + if (err) { + return next(err); + } + const { error, result } = updateStorageClass(data, storageClass); + if (error) { + return next(error); + } + objMD = result; + return next(); + }), + next => makeBackbeatRequest({ + method: 'PUT', + resourceType: 'metadata', + bucket, + objectKey: keyName, + queryObj: { + versionId: 'null', + }, + authCredentials: backbeatAuthCredentials, + requestBody: objMD, + }, next), + next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), + next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), + next => s3.listObjectVersions({ Bucket: bucket }, next), + ], (err, data) => { if (err) { - return next(err); - } - const { error, result } = updateStorageClass(data, storageClass); - if (error) { - return next(error); + return done(err); } - objMD = result; - return next(); - }), - next => makeBackbeatRequest({ - method: 'PUT', - resourceType: 'metadata', - bucket, - objectKey: keyName, - queryObj: { - versionId: 'null', - }, - authCredentials: backbeatAuthCredentials, - requestBody: objMD, - }, next), - next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next), - next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next), - next => s3.listObjectVersions({ Bucket: bucket }, next), - ], (err, data) => { - if (err) { - return done(err); - } - const headObjectRes = data[8]; - assert.strictEqual(headObjectRes.VersionId, 'null'); - assert(!headObjectRes.StorageClass); + const headObjectRes = data[8]; + assert.strictEqual(headObjectRes.VersionId, 'null'); + assert(!headObjectRes.StorageClass); - const listObjectVersionsRes = data[9]; - const { DeleteMarkers, Versions } = listObjectVersionsRes; - assert.strictEqual(DeleteMarkers.length, 0); - assert.strictEqual(Versions.length, 1); + const listObjectVersionsRes = data[9]; + const { DeleteMarkers, Versions } = listObjectVersionsRes; + assert.strictEqual(DeleteMarkers.length, 0); + assert.strictEqual(Versions.length, 1); - const currentVersion = Versions[0]; - assert(currentVersion.IsLatest); - assertVersionHasNotBeenUpdated(currentVersion, 'null'); + const currentVersion = Versions[0]; + assert(currentVersion.IsLatest); + assertVersionHasNotBeenUpdated(currentVersion, 'null'); - return done(); + return done(); + }); }); - }); it('should update current null version if versioning suspended and put a version afterwards', done => { let objMD; @@ -1436,9 +1436,8 @@ describe('backbeat routes', () => { }); // TODO: CLDSRV-394 unskip routeBackbeat tests - describe.skip('backbeat PUT routes', () => { - describe('PUT data + metadata should create a new complete object', - () => { + describe('backbeat PUT routes', () => { + describe('PUT data + metadata should create a new complete object', () => { [{ caption: 'with ascii test key', key: testKey, encodedKey: testKey, @@ -1483,52 +1482,53 @@ describe('backbeat routes', () => { key, encodedKey: encodeURI(key), caption: `with key ${key}`, }))) - .forEach(testCase => { - it(testCase.caption, done => { - async.waterfall([next => { - const queryObj = testCase.legacyAPI ? {} : { v2: '' }; - makeBackbeatRequest({ - method: 'PUT', bucket: testCase.encryption ? - TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - objectKey: testCase.encodedKey, - resourceType: 'data', - queryObj, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = getMetadataToPut(response); - if (testCase.encryption && !testCase.legacyAPI) { - assert.strictEqual(typeof newMd.location[0].cryptoScheme, 'number'); - assert.strictEqual(typeof newMd.location[0].cipheredDataKey, 'string'); - } else { - // if no encryption or legacy API, data should not be encrypted - assert.strictEqual(newMd.location[0].cryptoScheme, undefined); - assert.strictEqual(newMd.location[0].cipheredDataKey, undefined); - } - makeBackbeatRequest({ - method: 'PUT', bucket: testCase.encryption ? - TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - objectKey: testCase.encodedKey, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - checkObjectData( - s3, testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, - testCase.key, testData, next); - }], err => { - assert.ifError(err); - done(); + .forEach(testCase => { + it(testCase.caption, done => { + async.waterfall([next => { + const queryObj = testCase.legacyAPI ? {} : { v2: '' }; + makeBackbeatRequest({ + method: 'PUT', bucket: testCase.encryption ? + TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + objectKey: testCase.encodedKey, + resourceType: 'data', + queryObj, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + const newMd = getMetadataToPut(response); + if (testCase.encryption && !testCase.legacyAPI) { + assert.strictEqual(typeof newMd.location[0].cryptoScheme, 'number'); + assert.strictEqual(typeof newMd.location[0].cipheredDataKey, 'string'); + } else { + // if no encryption or legacy API, data should not be encrypted + assert.strictEqual(newMd.location[0].cryptoScheme, undefined); + assert.strictEqual(newMd.location[0].cipheredDataKey, undefined); + } + makeBackbeatRequest({ + method: 'PUT', bucket: testCase.encryption ? + TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + objectKey: testCase.encodedKey, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + checkObjectData( + s3, testCase.encryption ? TEST_ENCRYPTED_BUCKET : TEST_BUCKET, + testCase.key, testData, next); + }], err => { + assert.ifError(err); + done(); + }); }); }); - }); }); it('should PUT metadata for a non-versioned bucket', done => { @@ -1583,60 +1583,60 @@ describe('backbeat routes', () => { }); it('PUT metadata with "x-scal-replication-content: METADATA"' + - 'header should replicate metadata only', done => { - async.waterfall([next => { - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = getMetadataToPut(response); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // Don't update the sent metadata since it is sent by - // backbeat as received from the replication queue, - // without updated data location or encryption info - // (since that info is not known by backbeat) - const newMd = Object.assign({}, testMd); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, - objectKey: 'test-updatemd-key', - resourceType: 'metadata', - headers: { 'x-scal-replication-content': 'METADATA' }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - checkObjectData(s3, TEST_ENCRYPTED_BUCKET, 'test-updatemd-key', - testData, next); - }], err => { - assert.ifError(err); - done(); + 'header should replicate metadata only', done => { + async.waterfall([next => { + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + const newMd = getMetadataToPut(response); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // Don't update the sent metadata since it is sent by + // backbeat as received from the replication queue, + // without updated data location or encryption info + // (since that info is not known by backbeat) + const newMd = Object.assign({}, testMd); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_ENCRYPTED_BUCKET, + objectKey: 'test-updatemd-key', + resourceType: 'metadata', + headers: { 'x-scal-replication-content': 'METADATA' }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + checkObjectData(s3, TEST_ENCRYPTED_BUCKET, 'test-updatemd-key', + testData, next); + }], err => { + assert.ifError(err); + done(); + }); }); - }); - it('should PUT tags for a non-versioned bucket', function test(done) { + itSkipCeph('should PUT tags for a non-versioned bucket', function test(done) { this.timeout(10000); const bucket = NONVERSIONED_BUCKET; const awsBucket = - config.locationConstraints[awsLocation].details.bucketName; + config.locationConstraints[awsLocation].details.bucketName; const awsKey = uuidv4(); async.waterfall([ next => @@ -1676,280 +1676,287 @@ describe('backbeat routes', () => { }); it('should refuse PUT data if no x-scal-canonical-id header ' + - 'is provided', done => makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, resourceType: 'data', - queryObj: { v2: '' }, - headers: { - 'content-length': testData.length, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData, - }, - err => { - assert.strictEqual(err.code, 'BadRequest'); - done(); - })); + 'is provided', done => makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, resourceType: 'data', + queryObj: { v2: '' }, + headers: { + 'content-length': testData.length, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData, + }, + err => { + assert.strictEqual(err.code, 'BadRequest'); + done(); + })); it('should refuse PUT in metadata-only mode if object does not exist', - done => { - async.waterfall([next => { - const newMd = Object.assign({}, testMd); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: 'does-not-exist', - resourceType: 'metadata', - headers: { 'x-scal-replication-content': 'METADATA' }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }], err => { - assert.strictEqual(err.statusCode, 404); - done(); + done => { + async.waterfall([next => { + const newMd = Object.assign({}, testMd); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: 'does-not-exist', + resourceType: 'metadata', + headers: { 'x-scal-replication-content': 'METADATA' }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }], err => { + assert.strictEqual(err.statusCode, 404); + done(); + }); }); - }); it('should remove old object data locations if version is overwritten ' + - 'with same contents', done => { - let oldLocation; - const testKeyOldData = `${testKey}-old-data`; - async.waterfall([next => { - // put object's data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put object metadata - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - oldLocation = newMd.location; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put another object which metadata reference the - // same data locations, we will attempt to retrieve - // this object at the end of the test to confirm that - // its locations have been deleted - const oldDataMd = Object.assign({}, testMd); - oldDataMd.location = oldLocation; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKeyOldData, - resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(oldDataMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // create new data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // overwrite the original object version, now - // with references to the new data locations - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // give some time for the async deletes to complete - setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, testData, next), - 1000); - }, next => { - // check that the object copy referencing the old data - // locations is unreadable, confirming that the old - // data locations have been deleted - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKeyOldData, - }, err => { - assert(err, 'expected error to get object with old data ' + - 'locations, got success'); - next(); + 'with same contents', done => { + let oldLocation; + const testKeyOldData = `${testKey}-old-data`; + async.waterfall([next => { + // put object's data locations + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put object metadata + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + oldLocation = newMd.location; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put another object which metadata reference the + // same data locations, we will attempt to retrieve + // this object at the end of the test to confirm that + // its locations have been deleted + const oldDataMd = Object.assign({}, testMd); + oldDataMd.location = oldLocation; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKeyOldData, + resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(oldDataMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // create new data locations + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // overwrite the original object version, now + // with references to the new data locations + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // give some time for the async deletes to complete + setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, testData, next), + 1000); + }, next => { + // check that the object copy referencing the old data + // locations is unreadable, confirming that the old + // data locations have been deleted + s3.getObject({ + Bucket: TEST_BUCKET, + Key: testKeyOldData, + }, err => { + assert(err, 'expected error to get object with old data ' + + 'locations, got success'); + next(); + }); + }], err => { + assert.ifError(err); + done(); }); - }], err => { - assert.ifError(err); - done(); }); - }); - it('should remove old object data locations if version is overwritten ' + - 'with empty contents', done => { - let oldLocation; - const testKeyOldData = `${testKey}-old-data`; - async.waterfall([next => { - // put object's data locations - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put object metadata - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - oldLocation = newMd.location; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // put another object which metadata reference the - // same data locations, we will attempt to retrieve - // this object at the end of the test to confirm that - // its locations have been deleted - const oldDataMd = Object.assign({}, testMd); - oldDataMd.location = oldLocation; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKeyOldData, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(oldDataMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // overwrite the original object version with an empty location - const newMd = Object.assign({}, testMd); - newMd['content-length'] = 0; - newMd['content-md5'] = emptyContentsMd5; - newMd.location = null; - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: JSON.stringify(newMd), - }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - // give some time for the async deletes to complete - setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, '', next), - 1000); - }, next => { - // check that the object copy referencing the old data - // locations is unreadable, confirming that the old - // data locations have been deleted - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKeyOldData, - }, err => { - assert(err, 'expected error to get object with old data ' + - 'locations, got success'); - next(); + // TODO: CLDSRV-394 unskip or delete this test + // The new data location is set to null when archiving to a Cold site. + // In that case "removing old data location key" is handled by the lifecycle + // transition processor. + it.skip('should remove old object data locations if version is overwritten ' + + 'with empty contents', done => { + let oldLocation; + const testKeyOldData = `${testKey}-old-data`; + async.waterfall([next => { + // put object's data locations + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put object metadata + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + oldLocation = newMd.location; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // put another object which metadata reference the + // same data locations, we will attempt to retrieve + // this object at the end of the test to confirm that + // its locations have been deleted + const oldDataMd = Object.assign({}, testMd); + oldDataMd.location = oldLocation; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKeyOldData, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(oldDataMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // overwrite the original object version with an empty location + const newMd = Object.assign({}, testMd); + newMd['content-length'] = 0; + newMd['content-md5'] = emptyContentsMd5; + newMd.location = null; + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: JSON.stringify(newMd), + }, next); + }, (response, next) => { + assert.strictEqual(response.statusCode, 200); + // give some time for the async deletes to complete + setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, '', next), + 1000); + }, next => { + // check that the object copy referencing the old data + // locations is unreadable, confirming that the old + // data locations have been deleted + s3.getObject({ + Bucket: TEST_BUCKET, + Key: testKeyOldData, + }, err => { + assert(err, 'expected error to get object with old data ' + + 'locations, got success'); + next(); + }); + }], err => { + assert.ifError(err); + done(); }); - }], err => { - assert.ifError(err); - done(); }); - }); it('should not remove data locations on replayed metadata PUT', - done => { - let serializedNewMd; - async.waterfall([next => { - makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'data', - headers: { - 'content-length': testData.length, - 'x-scal-canonical-id': testArn, - }, - authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); - }, (response, next) => { - assert.strictEqual(response.statusCode, 200); - const newMd = Object.assign({}, testMd); - newMd.location = JSON.parse(response.body); - serializedNewMd = JSON.stringify(newMd); - async.timesSeries(2, (i, putDone) => makeBackbeatRequest({ - method: 'PUT', bucket: TEST_BUCKET, - objectKey: testKey, - resourceType: 'metadata', - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - authCredentials: backbeatAuthCredentials, - requestBody: serializedNewMd, - }, (err, response) => { - assert.ifError(err); + done => { + let serializedNewMd; + async.waterfall([next => { + makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'data', + headers: { + 'content-length': testData.length, + 'x-scal-canonical-id': testArn, + }, + authCredentials: backbeatAuthCredentials, + requestBody: testData + }, next); + }, (response, next) => { assert.strictEqual(response.statusCode, 200); - putDone(err); - }), () => next()); - }, next => { - // check that the object is still readable to make - // sure we did not remove the data keys - s3.getObject({ - Bucket: TEST_BUCKET, - Key: testKey, - }, (err, data) => { + const newMd = Object.assign({}, testMd); + newMd.location = JSON.parse(response.body); + serializedNewMd = JSON.stringify(newMd); + async.timesSeries(2, (i, putDone) => makeBackbeatRequest({ + method: 'PUT', bucket: TEST_BUCKET, + objectKey: testKey, + resourceType: 'metadata', + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + authCredentials: backbeatAuthCredentials, + requestBody: serializedNewMd, + }, (err, response) => { + assert.ifError(err); + assert.strictEqual(response.statusCode, 200); + putDone(err); + }), () => next()); + }, next => { + // check that the object is still readable to make + // sure we did not remove the data keys + s3.getObject({ + Bucket: TEST_BUCKET, + Key: testKey, + }, (err, data) => { + assert.ifError(err); + assert.strictEqual(data.Body.toString(), testData); + next(); + }); + }], err => { assert.ifError(err); - assert.strictEqual(data.Body.toString(), testData); - next(); + done(); }); - }], err => { - assert.ifError(err); - done(); }); - }); it('should create a new version when no versionId is passed in query string', done => { - let newVersion; async.waterfall([next => { // put object's data locations makeBackbeatRequest({ @@ -1961,7 +1968,8 @@ describe('backbeat routes', () => { 'x-scal-canonical-id': testArn, }, authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); + requestBody: testData + }, next); }, (response, next) => { assert.strictEqual(response.statusCode, 200); // put object metadata @@ -1991,7 +1999,8 @@ describe('backbeat routes', () => { 'x-scal-canonical-id': testArn, }, authCredentials: backbeatAuthCredentials, - requestBody: testData }, next); + requestBody: testData + }, next); }, (response, next) => { assert.strictEqual(response.statusCode, 200); // create a new version with the new data locations, @@ -2007,9 +2016,8 @@ describe('backbeat routes', () => { }, next); }, (response, next) => { assert.strictEqual(response.statusCode, 200); - const parsedResponse = JSON.parse(response.body); - newVersion = parsedResponse.versionId; - assert.notStrictEqual(newVersion, testMd.versionId); + // when no version id is provided, we return nothing + assert.strictEqual(response.body.length, 0); // give some time for the async deletes to complete, // then check that we can read the latest version setTimeout(() => s3.getObject({ @@ -2037,26 +2045,27 @@ describe('backbeat routes', () => { }); }); }); - describe.skip('backbeat authorization checks', () => { + + describe('backbeat authorization checks', () => { [{ method: 'PUT', resourceType: 'metadata' }, - { method: 'PUT', resourceType: 'data' }].forEach(test => { - const queryObj = test.resourceType === 'data' ? { v2: '' } : {}; - it(`${test.method} ${test.resourceType} should respond with ` + - '403 Forbidden if no credentials are provided', - done => { - makeBackbeatRequest({ - method: test.method, bucket: TEST_BUCKET, - objectKey: TEST_KEY, resourceType: test.resourceType, - queryObj, - }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); - }); - it(`${test.method} ${test.resourceType} should respond with ` + + { method: 'PUT', resourceType: 'data' }].forEach(test => { + const queryObj = test.resourceType === 'data' ? { v2: '' } : {}; + it(`${test.method} ${test.resourceType} should respond with ` + + '403 Forbidden if no credentials are provided', + done => { + makeBackbeatRequest({ + method: test.method, bucket: TEST_BUCKET, + objectKey: TEST_KEY, resourceType: test.resourceType, + queryObj, + }, + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + }); + }); + it(`${test.method} ${test.resourceType} should respond with ` + '403 Forbidden if wrong credentials are provided', done => { makeBackbeatRequest({ @@ -2068,14 +2077,14 @@ describe('backbeat routes', () => { secretKey: 'still wrong', }, }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'InvalidAccessKeyId'); - done(); - }); + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'InvalidAccessKeyId'); + done(); + }); }); - it(`${test.method} ${test.resourceType} should respond with ` + + it(`${test.method} ${test.resourceType} should respond with ` + '403 Forbidden if the account does not match the ' + 'backbeat user', done => { @@ -2088,14 +2097,14 @@ describe('backbeat routes', () => { secretKey: 'verySecretKey2', }, }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + }); }); - it(`${test.method} ${test.resourceType} should respond with ` + + it(`${test.method} ${test.resourceType} should respond with ` + '403 Forbidden if backbeat user has wrong secret key', done => { makeBackbeatRequest({ @@ -2107,55 +2116,55 @@ describe('backbeat routes', () => { secretKey: 'hastalavista', }, }, - err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'SignatureDoesNotMatch'); - done(); - }); + err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'SignatureDoesNotMatch'); + done(); + }); }); - }); + }); it('GET /_/backbeat/api/... should respond with ' + - '503 on authenticated requests (API server down)', - done => { - const options = { - authCredentials: { - accessKey: 'accessKey2', - secretKey: 'verySecretKey2', - }, - hostname: ipAddress, - port: 8000, - method: 'GET', - path: '/_/backbeat/api/crr/failed', - jsonResponse: true, - }; - makeRequest(options, err => { - assert(err); - assert.strictEqual(err.statusCode, 503); - assert.strictEqual(err.code, 'ServiceUnavailable'); - done(); - }); - }); + '503 on authenticated requests (API server down)', + done => { + const options = { + authCredentials: { + accessKey: 'accessKey2', + secretKey: 'verySecretKey2', + }, + hostname: ipAddress, + port: 8000, + method: 'GET', + path: '/_/backbeat/api/crr/failed', + jsonResponse: true, + }; + makeRequest(options, err => { + assert(err); + assert.strictEqual(err.statusCode, 503); + assert.strictEqual(err.code, 'ServiceUnavailable'); + done(); + }); + }); it('GET /_/backbeat/api/... should respond with ' + - '403 Forbidden if the request is unauthenticated', - done => { - const options = { - hostname: ipAddress, - port: 8000, - method: 'GET', - path: '/_/backbeat/api/crr/failed', - jsonResponse: true, - }; - makeRequest(options, err => { - assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); - }); + '403 Forbidden if the request is unauthenticated', + done => { + const options = { + hostname: ipAddress, + port: 8000, + method: 'GET', + path: '/_/backbeat/api/crr/failed', + jsonResponse: true, + }; + makeRequest(options, err => { + assert(err); + assert.strictEqual(err.statusCode, 403); + assert.strictEqual(err.code, 'AccessDenied'); + done(); + }); + }); }); - describe.skip('GET Metadata route', () => { + describe('GET Metadata route', () => { beforeEach(done => makeBackbeatRequest({ method: 'PUT', bucket: TEST_BUCKET, objectKey: TEST_KEY, @@ -2193,188 +2202,188 @@ describe('backbeat routes', () => { }, }, (err, data) => { assert.strictEqual(data.statusCode, 404); - assert.strictEqual(JSON.parse(data.body).code, 'NoSuchBucket'); - done(); - }); - }); - - it('should return error if object does not exist', done => { - makeBackbeatRequest({ - method: 'GET', bucket: TEST_BUCKET, - objectKey: 'blah', resourceType: 'metadata', - authCredentials: backbeatAuthCredentials, - queryObj: { - versionId: versionIdUtils.encode(testMd.versionId), - }, - }, (err, data) => { - assert.strictEqual(data.statusCode, 404); - assert.strictEqual(JSON.parse(data.body).code, 'ObjNotFound'); - done(); - }); - }); - }); - describe.skip('backbeat multipart upload operations', function test() { - this.timeout(10000); - - // The ceph image does not support putting tags during initiate MPU. - itSkipCeph('should put tags if the source is AWS and tags are ' + - 'provided when initiating the multipart upload', done => { - const awsBucket = - config.locationConstraints[awsLocation].details.bucketName; - const awsKey = uuidv4(); - const multipleBackendPath = - `/_/backbeat/multiplebackenddata/${awsBucket}/${awsKey}`; - let uploadId; - let partData; - async.series([ - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'initiatempu' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), - }, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - uploadId = JSON.parse(data.body).uploadId; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'PUT', - path: multipleBackendPath, - queryObj: { operation: 'putpart' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-upload-id': uploadId, - 'x-scal-part-number': '1', - 'content-length': testData.length, - }, - requestBody: testData, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - const body = JSON.parse(data.body); - partData = [{ - PartNumber: [body.partNumber], - ETag: [body.ETag], - }]; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'completempu' }, - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-storage-type': 'aws_s3', - 'x-scal-upload-id': uploadId, - }, - requestBody: JSON.stringify(partData), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, [{ - Key: 'key1', - Value: 'value1', - }]); - next(); - }), - ], done); + assert.strictEqual(JSON.parse(data.body).code, 'NoSuchBucket'); + done(); + }); }); - it('should put tags if the source is Azure and tags are provided ' + - 'when completing the multipart upload', done => { - const containerName = getAzureContainerName(azureLocation); - const blob = uuidv4(); - const multipleBackendPath = - `/_/backbeat/multiplebackenddata/${containerName}/${blob}`; - const uploadId = uuidv4().replace(/-/g, ''); - let partData; - async.series([ - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'PUT', - path: multipleBackendPath, - queryObj: { operation: 'putpart' }, - headers: { - 'x-scal-storage-class': azureLocation, - 'x-scal-storage-type': 'azure', - 'x-scal-upload-id': uploadId, - 'x-scal-part-number': '1', - 'content-length': testData.length, - }, - requestBody: testData, - jsonResponse: true, - }, (err, data) => { - if (err) { - return next(err); - } - const body = JSON.parse(data.body); - partData = [{ - PartNumber: [body.partNumber], - ETag: [body.ETag], - NumberSubParts: [body.numberSubParts], - }]; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: multipleBackendPath, - queryObj: { operation: 'completempu' }, - headers: { - 'x-scal-storage-class': azureLocation, - 'x-scal-storage-type': 'azure', - 'x-scal-upload-id': uploadId, - 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), - }, - requestBody: JSON.stringify(partData), - jsonResponse: true, - }, next), - next => - azureClient.getBlobProperties( - containerName, blob, (err, result) => { + + it('should return error if object does not exist', done => { + makeBackbeatRequest({ + method: 'GET', bucket: TEST_BUCKET, + objectKey: 'blah', resourceType: 'metadata', + authCredentials: backbeatAuthCredentials, + queryObj: { + versionId: versionIdUtils.encode(testMd.versionId), + }, + }, (err, data) => { + assert.strictEqual(data.statusCode, 404); + assert.strictEqual(JSON.parse(data.body).code, 'ObjNotFound'); + done(); + }); + }); + }); + + describe('backbeat multipart upload operations', function test() { + this.timeout(10000); + + // The ceph image does not support putting tags during initiate MPU. + itSkipCeph('should put tags if the source is AWS and tags are ' + + 'provided when initiating the multipart upload', done => { + const awsBucket = + config.locationConstraints[awsLocation].details.bucketName; + const awsKey = uuidv4(); + const multipleBackendPath = + `/_/backbeat/multiplebackenddata/${awsBucket}/${awsKey}`; + let uploadId; + let partData; + async.series([ + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'initiatempu' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), + }, + jsonResponse: true, + }, (err, data) => { if (err) { return next(err); } - const tags = JSON.parse(result.metadata.tags); - assert.deepStrictEqual(tags, { key1: 'value1' }); + uploadId = JSON.parse(data.body).uploadId; return next(); }), - ], done); - }); + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'PUT', + path: multipleBackendPath, + queryObj: { operation: 'putpart' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-upload-id': uploadId, + 'x-scal-part-number': '1', + 'content-length': testData.length, + }, + requestBody: testData, + jsonResponse: true, + }, (err, data) => { + if (err) { + return next(err); + } + const body = JSON.parse(data.body); + partData = [{ + PartNumber: [body.partNumber], + ETag: [body.ETag], + }]; + return next(); + }), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'completempu' }, + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-storage-type': 'aws_s3', + 'x-scal-upload-id': uploadId, + }, + requestBody: JSON.stringify(partData), + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, [{ + Key: 'key1', + Value: 'value1', + }]); + next(); + }), + ], done); + }); + + it.skip('should put tags if the source is Azure and tags are provided ' + + 'when completing the multipart upload', done => { + const containerName = getAzureContainerName(azureLocation); + const blob = uuidv4(); + const multipleBackendPath = + `/_/backbeat/multiplebackenddata/${containerName}/${blob}`; + const uploadId = uuidv4().replace(/-/g, ''); + let partData; + async.series([ + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'PUT', + path: multipleBackendPath, + queryObj: { operation: 'putpart' }, + headers: { + 'x-scal-storage-class': azureLocation, + 'x-scal-storage-type': 'azure', + 'x-scal-upload-id': uploadId, + 'x-scal-part-number': '1', + 'content-length': testData.length, + }, + requestBody: testData, + jsonResponse: true, + }, (err, data) => { + if (err) { + return next(err); + } + const body = JSON.parse(data.body); + partData = [{ + PartNumber: [body.partNumber], + ETag: [body.ETag], + NumberSubParts: [body.numberSubParts], + }]; + return next(); + }), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: multipleBackendPath, + queryObj: { operation: 'completempu' }, + headers: { + 'x-scal-storage-class': azureLocation, + 'x-scal-storage-type': 'azure', + 'x-scal-upload-id': uploadId, + 'x-scal-tags': JSON.stringify({ 'key1': 'value1' }), + }, + requestBody: JSON.stringify(partData), + jsonResponse: true, + }, next), + next => + azureClient.getContainerClient(containerName).getBlobClient(blob) + .getProperties().then(res => { + const tags = JSON.parse(res.metadata.tags); + assert.deepStrictEqual(tags, { key1: 'value1' }); + return next(); + }, assert.ifError), + ], done); + }); }); - describe.skip('Batch Delete Route', function test() { + + describe('Batch Delete Route', function test() { this.timeout(30000); it('should batch delete a local location', done => { let versionId; @@ -2417,7 +2426,7 @@ describe('backbeat routes', () => { method: 'POST', path: '/_/backbeat/batchdelete', requestBody: - `{"Locations":${JSON.stringify(location)}}`, + `{"Locations":${JSON.stringify(location)}}`, jsonResponse: true, }; makeRequest(options, done); @@ -2432,7 +2441,8 @@ describe('backbeat routes', () => { }), ], done); }); - it('should batch delete a versioned AWS location', done => { + + itSkipCeph('should batch delete a versioned AWS location', done => { let versionId; const awsKey = `${TEST_BUCKET}/batch-delete-test-key-${makeid(8)}`; @@ -2459,7 +2469,7 @@ describe('backbeat routes', () => { hostname: ipAddress, port: 8000, method: 'POST', - path: '/_/backbeat/batchdelete', + path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, requestBody: reqBody, jsonResponse: true, }; @@ -2504,7 +2514,7 @@ describe('backbeat routes', () => { method: 'POST', path: '/_/backbeat/batchdelete', requestBody: - '{"Locations":' + + '{"Locations":' + '[{"key":"abcdef","dataStoreName":"us-east-1"}]}', jsonResponse: true, }; @@ -2530,259 +2540,269 @@ describe('backbeat routes', () => { }); it('should not put delete tags if the source is not Azure and ' + - 'if-unmodified-since header is not provided', done => { - const awsKey = uuidv4(); - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - headers: { - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + 'if-unmodified-since header is not provided', done => { + const awsKey = uuidv4(); + async.series([ + next => + awsClient.putObject({ + Bucket: awsBucket, + Key: awsKey, + }, next), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: '/_/backbeat/batchdelete', + headers: { + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: awsKey, + dataStoreName: awsLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + assert.ifError(err); + assert.deepStrictEqual(data.TagSet, []); + next(); }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, []); - next(); - }), - ], done); - }); + ], done); + }); - it('should not put tags if the source is not Azure and ' + - 'if-unmodified-since condition is not met', done => { - const awsKey = uuidv4(); - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: '/_/backbeat/batchdelete', - headers: { - 'if-unmodified-since': - 'Sun, 31 Mar 2019 00:00:00 GMT', - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + itSkipCeph('should not put tags if the source is not Azure and ' + + 'if-unmodified-since condition is not met', done => { + const awsKey = uuidv4(); + async.series([ + next => + awsClient.putObject({ + Bucket: awsBucket, + Key: awsKey, + }, next), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, + headers: { + 'if-unmodified-since': + new Date(Date.now() + 86400000).toUTCString(), + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: awsKey, + dataStoreName: awsLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + if (err) { + return next(err); + } + if (data.TagSet.length !== 2) { + return next(new Error(`Expected 2 tags, got ${JSON.stringify(data)}`)); + } + return next(); }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data.TagSet, []); - next(); - }), - ], done); - }); + ], done); + }); - it('should put tags if the source is not Azure and ' + - 'if-unmodified-since condition is met', done => { - const awsKey = uuidv4(); - let lastModified; - async.series([ - next => - awsClient.putObject({ - Bucket: awsBucket, - Key: awsKey, - }, next), - next => - awsClient.headObject({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - if (err) { - return next(err); - } - lastModified = data.LastModified; - return next(); - }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, - headers: { - 'if-unmodified-since': lastModified, - 'x-scal-storage-class': awsLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', - }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: awsKey, - dataStoreName: awsLocation, - }], + itSkipCeph('should put tags if the source is not Azure and ' + + 'if-unmodified-since condition is met', done => { + const awsKey = uuidv4(); + let lastModified; + async.series([ + next => + awsClient.putObject({ + Bucket: awsBucket, + Key: awsKey, + }, next), + next => + awsClient.headObject({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + if (err) { + return next(err); + } + lastModified = data.LastModified; + return next(); }), - jsonResponse: true, - }, next), - next => - awsClient.getObjectTagging({ - Bucket: awsBucket, - Key: awsKey, - }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.TagSet.length, 2); - data.TagSet.forEach(tag => { - const { Key, Value } = tag; - const isValidTag = - Key === 'scal-delete-marker' || - Key === 'scal-delete-service'; - assert(isValidTag); - if (Key === 'scal-delete-marker') { - assert.strictEqual(Value, 'true'); + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: `/_/backbeat/batchdelete/${awsBucket}/${awsKey}`, + headers: { + 'if-unmodified-since': lastModified, + 'x-scal-storage-class': awsLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: awsKey, + dataStoreName: awsLocation, + }], + }), + jsonResponse: true, + }, next), + next => + awsClient.getObjectTagging({ + Bucket: awsBucket, + Key: awsKey, + }, (err, data) => { + if (err) { + return next(err); } - if (Key === 'scal-delete-service') { - assert.strictEqual( - Value, 'lifecycle-transition'); + if (data.TagSet.length !== 2) { + return next(new Error(`Expected 2 tags, got ${data.TagSet}`)); } - }); - next(); - }), - ], done); - }); + const errors = []; + data.TagSet.forEach(tag => { + const { Key, Value } = tag; + const isValidTag = + Key === 'scal-delete-marker' || + Key === 'scal-delete-service'; + if (!isValidTag) { + errors.push(`Invalid tag: ${Key}`); + } + if (Key === 'scal-delete-marker' && Value !== 'true') { + errors.push(`Invalid tag scal-delete-marker value: ${Value}`); + } + if (Key === 'scal-delete-service' && Value !== 'lifecycle-transition') { + errors.push(`Invalid tag scal-delete-service value: ${Value}`); + } + }); + if (errors.length === 0) { + return next(); + } + return next(new Error(errors.join(', '))); + }), + ], done); + }); - it('should not delete the object if the source is Azure and ' + - 'if-unmodified-since condition is not met', done => { - const blob = uuidv4(); - async.series([ - next => - azureClient.createBlockBlobFromText( - containerName, blob, 'a', null, next), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/batchdelete/${containerName}/${blob}`, - headers: { - 'if-unmodified-since': - 'Sun, 31 Mar 2019 00:00:00 GMT', - 'x-scal-storage-class': azureLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + it.skip('should not delete the object if the source is Azure and ' + + 'if-unmodified-since condition is not met', done => { + const blob = uuidv4(); + async.series([ + next => + azureClient.getContainerClient(containerName).getBlockBlobClient(blob) + .upload('a', 1, next), + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: + `/_/backbeat/batchdelete/${containerName}/${blob}`, + headers: { + 'if-unmodified-since': + 'Sun, 31 Mar 2019 00:00:00 GMT', + 'x-scal-storage-class': azureLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: blob, + dataStoreName: azureLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: blob, - dataStoreName: azureLocation, - }], - }), - jsonResponse: true, - }, err => { - if (err && err.statusCode === 412) { - return next(); - } - return next(err); - }), - next => - azureClient.getBlobProperties( - containerName, blob, (err, result) => { - if (err) { - return next(err); + jsonResponse: true, + }, err => { + if (err && err.statusCode === 412) { + return next(); } + return next(err); + }), + next => + azureClient.getContainerClient(containerName).getProperties(blob).then(result => { assert(result); return next(); + }, err => { + next(new Error(`Error from Azure: ${err}`)); }), - ], done); - }); + ], done); + }); - it('should delete the object if the source is Azure and ' + - 'if-unmodified-since condition is met', done => { - const blob = uuidv4(); - let lastModified; - async.series([ - next => - azureClient.createBlockBlobFromText( - containerName, blob, 'a', null, next), - next => - azureClient.getBlobProperties( - containerName, blob, (err, result) => { - if (err) { - return next(err); - } + it.skip('should delete the object if the source is Azure and ' + + 'if-unmodified-since condition is met', done => { + const blob = uuidv4(); + let lastModified; + async.series([ + next => + azureClient.getContainerClient(containerName).getBlockBlobClient(blob) + .upload('a', 1, next), + next => + azureClient.getContainerClient(containerName).getProperties(blob).then(result => { lastModified = result.lastModified; return next(); + }, err => { + next(new Error(`Error from Azure: ${err}`)); }), - next => - makeRequest({ - authCredentials: backbeatAuthCredentials, - hostname: ipAddress, - port: 8000, - method: 'POST', - path: - `/_/backbeat/batchdelete/${containerName}/${blob}`, - headers: { - 'if-unmodified-since': lastModified, - 'x-scal-storage-class': azureLocation, - 'x-scal-tags': JSON.stringify({ - 'scal-delete-marker': 'true', - 'scal-delete-service': 'lifecycle-transition', + next => + makeRequest({ + authCredentials: backbeatAuthCredentials, + hostname: ipAddress, + port: 8000, + method: 'POST', + path: + `/_/backbeat/batchdelete/${containerName}/${blob}`, + headers: { + 'if-unmodified-since': lastModified, + 'x-scal-storage-class': azureLocation, + 'x-scal-tags': JSON.stringify({ + 'scal-delete-marker': 'true', + 'scal-delete-service': 'lifecycle-transition', + }), + }, + requestBody: JSON.stringify({ + Locations: [{ + key: blob, + dataStoreName: azureLocation, + }], }), - }, - requestBody: JSON.stringify({ - Locations: [{ - key: blob, - dataStoreName: azureLocation, - }], + jsonResponse: true, + }, next), + next => + azureClient.getContainerClient(containerName).getProperties(blob).then(() => { + next(new Error('Azure should return 404')); + }, err => { + next(err.statusCode === 404 ? null : err); }), - jsonResponse: true, - }, next), - next => - azureClient.getBlobProperties(containerName, blob, err => { - assert(err.statusCode === 404); - return next(); - }), - ], done); - }); + ], done); + }); }); }); diff --git a/tests/unit/DummyRequest.js b/tests/unit/DummyRequest.js index 28b21337eb..345927db83 100644 --- a/tests/unit/DummyRequest.js +++ b/tests/unit/DummyRequest.js @@ -25,6 +25,14 @@ class DummyRequest extends http.IncomingMessage { this.push(msg); } this.push(null); + if (!this.socket) { + this.socket = { + remoteAddress: '127.0.0.1', + destroy: () => {}, + on: () => {}, + removeListener: () => {}, + }; + } } } diff --git a/tests/unit/routeBackbeat.js b/tests/unit/routeBackbeat.js new file mode 100644 index 0000000000..8dc881f258 --- /dev/null +++ b/tests/unit/routeBackbeat.js @@ -0,0 +1,321 @@ +const sinon = require('sinon'); +const async = require('async'); +const assert = require('assert'); +const DummyRequest = require('./DummyRequest'); +const { routeBackbeat, backbeatRoutes } = require('../../lib/routes/routeBackbeat'); +const { bucketPut } = require('../../lib/api/bucketPut'); +const { makeAuthInfo, versioningTestUtils, DummyRequestLogger } = require('./helpers'); +const objectPut = require('../../lib/api/objectPut'); +const { auth, errors } = require('arsenal'); +const { default: AuthInfo } = require('arsenal/build/lib/auth/AuthInfo'); +const bucketPutVersioning = require('../../lib/api/bucketPutVersioning'); + +const log = new DummyRequestLogger(); +const bucketName = 'bucketname'; +const canonicalID = 'accessKey1'; +const authInfo = makeAuthInfo(canonicalID); +const namespace = 'default'; +const objectName = 'objectName'; +const postBody = Buffer.from('I am a body', 'utf8'); + +const testBucket = { + bucketName, + namespace, + headers: { + 'host': `${bucketName}.s3.amazonaws.com`, + }, + url: `/${bucketName}`, + actionImplicitDenies: false, +}; + +const testObject = new DummyRequest({ + bucketName, + namespace, + objectKey: objectName, + headers: { + 'x-amz-meta-test': 'some metadata', + 'content-length': '12', + }, + parsedContentLength: 12, + url: `/${bucketName}/${objectName}`, +}, postBody); + +describe('routeBackbeat', () => { + let request; + let response; + + beforeEach(() => { + sinon.stub(backbeatRoutes, 'PUT').returns({ + data: sinon.stub(), + metadata: sinon.stub(), + multiplebackenddata: { + putobject: sinon.stub(), + putpart: sinon.stub(), + }, + }); + + sinon.stub(backbeatRoutes, 'POST').returns({ + multiplebackenddata: { + initiatempu: sinon.stub(), + completempu: sinon.stub(), + puttagging: sinon.stub(), + }, + batchdelete: sinon.stub(), + index: { + add: sinon.stub(), + delete: sinon.stub(), + }, + }); + + sinon.stub(backbeatRoutes, 'DELETE').returns({ + expiration: sinon.stub(), + multiplebackenddata: { + deleteobject: sinon.stub(), + deleteobjecttagging: sinon.stub(), + abortmpu: sinon.stub(), + }, + }); + + sinon.stub(backbeatRoutes, 'GET').returns({ + metadata: sinon.stub(), + multiplebackendmetadata: sinon.stub(), + lifecycle: sinon.stub(), + index: sinon.stub(), + }); + + request = new DummyRequest( + { + method: 'GET', + headers: { 'content-length': '123' }, + url: '/_/backbeat/multiplebackendmetadata/bucketName/objectKey?operation=putobject', + }, + 'body' + ); + response = { + setHeader: sinon.stub(), + writeHead: sinon.stub(), + end: sinon.stub().callsFake((body, format, cb) => cb()), + }; + }); + + afterEach(() => { + sinon.restore(); + }); + + it('should reject if the request is invalid', done => { + request.url = '/_/backbeat//bucketName/objectKey?operation=putobject'; + // Cover the case invalidRequest === true + routeBackbeat('127.0.0.1', request, response, log, err => { + assert(err.is.MethodNotAllowed); + done(); + }); + }); + + it('should reject if the route is invalid', done => { + request.url = '/_/backbeat/wrong/bucketName/objectKey?operation=putobject'; + // Cover the case invalidRoute === true + routeBackbeat('127.0.0.1', request, response, log, err => { + assert(err.is.MethodNotAllowed); + done(); + }); + }); + + [ + { + method: 'PUT', + resourceType: 'metadata', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: false, + expect: errors.MalformedPOSTRequest, + }, + { + method: 'GET', + resourceType: 'metadata', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: true, + }, + { + method: 'PUT', + resourceType: 'data', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'PUT', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'putobject', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'PUT', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'putpart', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'DELETE', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'deleteobject', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'DELETE', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'abortmpu', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'DELETE', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'deleteobjecttagging', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'initiatempu', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'completempu', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'multiplebackenddata', + target: `${bucketName}/${objectName}`, + operation: 'puttagging', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'GET', + resourceType: 'multiplebackendmetadata', + target: `${bucketName}/${objectName}`, + operation: null, + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'batchdelete', + target: null, + operation: null, + versionId: false, + expect: errors.MalformedPOSTRequest, + }, + { + method: 'GET', + resourceType: 'lifecycle', + target: `${bucketName}?list-type=wrong`, + operation: null, + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'index', + target: bucketName, + operation: 'add', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'POST', + resourceType: 'index', + target: bucketName, + operation: 'delete', + versionId: false, + expect: errors.BadRequest, + }, + { + method: 'GET', + resourceType: 'index', + target: null, + operation: 'delete', + versionId: false, + expect: errors.NotImplemented, + } + ].forEach(testCase => { + it(`should call method ${testCase.method} ${testCase.resourceType}`, done => { + let hasQuery = false; + let versionIdParsed = null; + request.method = testCase.method; + request.url = `/_/backbeat/${testCase.resourceType}/${testCase.target}`; + if (testCase.operation) { + request.url += `?operation=${testCase.operation}`; + hasQuery = true; + } + + // Mock auth server to ignore auth in this test + sinon.stub(auth.server, 'doAuth').callsFake((req, log, cb) => + cb(null, new AuthInfo({ + canonicalID: 'abcdef/lifecycle', + accountDisplayName: 'Lifecycle Service Account', + }), undefined, undefined, { + accountQuota: 1000, + }) + ); + + const enableVersioningRequest = + versioningTestUtils.createBucketPutVersioningReq(bucketName, 'Enabled'); + + return async.series([ + next => bucketPut(authInfo, testBucket, log, next), + next => bucketPutVersioning(authInfo, enableVersioningRequest, log, next), + next => objectPut(authInfo, testObject, undefined, log, (err, res) => { + versionIdParsed = res['x-amz-version-id']; + if (testCase.versionId) { + request.url += `${(hasQuery ? '&' : '?')}&versionId=${versionIdParsed}`; + } + next(err); + }), + next => routeBackbeat('127.0.0.1', request, response, log, next), + ], err => { + if (testCase.expect) { + assert.strictEqual(err.code, testCase.expect.code); + return done(); + } + assert.ifError(err); + assert.strictEqual(Array.isArray(request.finalizerHooks), true); + assert.strictEqual(request.apiMethods[0], 'objectReplicate'); + assert.strictEqual(request.apiMethods.length, 1); + assert.strictEqual(request.accountQuotas, 1000); + return done(); + }); + }); + }); + + // Although the authz result is by default an implicit deny, the + // ACL should prevent any further processing for non-service or + // non-account identities. + it('should return access denied if doAuth returns an error', done => { + request.method = 'PUT'; + request.url = `/_/backbeat/metadata/${bucketName}/${objectName}?operation=putobject`; + + routeBackbeat('127.0.0.1', request, response, log, err => { + assert(err.is.AccessDenied); + done(); + }); + }); +});