From b2ce22412b7220ae0b96111f1b2ef2a9bab35116 Mon Sep 17 00:00:00 2001 From: shirady <57721533+shirady@users.noreply.github.com> Date: Thu, 5 Dec 2024 10:00:08 +0200 Subject: [PATCH] NC | NSFS | Add stat to account_cache 1. In account_cache move the value that we had in expiry_ms to the config.js (with its comment of "TODO"), and add _validate_account function (like in bucket_namespace_cache we have the method _validate_bucket_namespace). 2. In read_account_by_access_key add the property stat so we can save the stat of the account config. 3. Rename check_same_stat to check_same_stat_bucket so we can have 2 methods for buckets and accounts. Also rename ns_allow_stat_bucket to bs_allow_stat_bucket (as it is implemented in the bucketspace and not in the namespace). 4. Add stat_account_config_file in config_fs. Signed-off-by: shirady <57721533+shirady@users.noreply.github.com> --- config.js | 5 ++ src/sdk/bucketspace_fs.js | 36 ++++++++- src/sdk/config_fs.js | 20 +++++ src/sdk/nb.d.ts | 3 +- src/sdk/object_sdk.js | 31 ++++++-- src/test/unit_tests/nc_coretest.js | 2 +- .../test_nc_with_a_couple_of_forks.js | 77 ++++++++++++++++++- 7 files changed, 161 insertions(+), 13 deletions(-) diff --git a/config.js b/config.js index 20fb4a7c7c..dbea63a3ec 100644 --- a/config.js +++ b/config.js @@ -637,9 +637,14 @@ config.INLINE_MAX_SIZE = 4096; // Object SDK bucket cache expiration time config.OBJECT_SDK_BUCKET_CACHE_EXPIRY_MS = 60000; +// Object SDK account cache expiration time +config.OBJECT_SDK_ACCOUNT_CACHE_EXPIRY_MS = Number(process.env.ACCOUNTS_CACHE_EXPIRY) || 10 * 60 * 1000; // TODO: Decide on a time that we want to invalidate + // Object SDK bucket_namespace_cache allow stat of the config file config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION = true; +// Object SDK account_cache allow stat of the config file +config.NC_ENABLE_ACCOUNT_CACHE_STAT_VALIDATION = true; ////////////////////////////// // OPERATOR RELATED // diff --git a/src/sdk/bucketspace_fs.js b/src/sdk/bucketspace_fs.js index 6f07c5ed7c..b41cf681e7 100644 --- a/src/sdk/bucketspace_fs.js +++ b/src/sdk/bucketspace_fs.js @@ -66,6 +66,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { return fs_context; } + // TODO: account function should be handled in accountspace_fs async read_account_by_access_key({ access_key }) { try { if (!access_key) throw new Error('no access key'); @@ -85,6 +86,12 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { if (account.nsfs_account_config.distinguished_name) { account.nsfs_account_config.distinguished_name = new SensitiveString(account.nsfs_account_config.distinguished_name); } + try { + account.stat = await this.config_fs.stat_account_config_file(access_key); + } catch (err) { + dbg.warn(`BucketspaceFS.read_account_by_access_key could not stat_account_config_file` + + `of account id: ${account._id} account name: ${account.name.unwrap()}`); + } return account; } catch (err) { dbg.error('BucketSpaceFS.read_account_by_access_key: failed with error', err); @@ -186,18 +193,39 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { } /** - * check_same_stat will return true the config file was not changed + * check_same_stat_bucket will return true the config file was not changed + * in case we had any issue (for example error during stat) the returned value will be undefined + * @param {string} bucket_name * @param {nb.NativeFSStats} bucket_stat - * @returns Promise<{boolean>} + * @returns Promise<{boolean|undefined>} */ - async check_same_stat(bucket_name, bucket_stat) { + async check_same_stat_bucket(bucket_name, bucket_stat) { try { const current_stat = await this.config_fs.stat_bucket_config_file(bucket_name); if (current_stat) { return current_stat.ino === bucket_stat.ino && current_stat.mtimeNsBigint === bucket_stat.mtimeNsBigint; } } catch (err) { - dbg.warn('check_same_stat: current_stat got an error', err, 'ignoring...'); + dbg.warn('check_same_stat_bucket: current_stat got an error', err, 'ignoring...'); + } + } + + /** + * check_same_stat_account will return true the config file was not changed + * in case we had any issue (for example error during stat) the returned value will be undefined + * @param {Symbol|string} access_key + * @param {nb.NativeFSStats} account_stat + * @returns Promise<{boolean|undefined>} + */ + // TODO: account function should be handled in accountspace_fs + async check_same_stat_account(access_key, account_stat) { + try { + const current_stat = await this.config_fs.stat_account_config_file(access_key); + if (current_stat) { + return current_stat.ino === account_stat.ino && current_stat.mtimeNsBigint === account_stat.mtimeNsBigint; + } + } catch (err) { + dbg.warn('check_same_stat_account: current_stat got an error', err, 'ignoring...'); } } diff --git a/src/sdk/config_fs.js b/src/sdk/config_fs.js index 9234df6f98..03130301fa 100644 --- a/src/sdk/config_fs.js +++ b/src/sdk/config_fs.js @@ -17,6 +17,7 @@ const { RpcError } = require('../rpc'); const nc_mkm = require('../manage_nsfs/nc_master_key_manager').get_instance(); const nsfs_schema_utils = require('../manage_nsfs/nsfs_schema_utils'); const { version_compare } = require('../upgrade/upgrade_utils'); +const { anonymous_access_key } = require('./object_sdk'); /** @typedef {import('fs').Dirent} Dirent */ @@ -448,6 +449,25 @@ class ConfigFS { return path.join(this.old_accounts_dir_path, this.json(account_name)); } + /** + * stat_account_config_file will return the stat output on account config file + * please notice that stat might throw an error - you should wrap it with try-catch and handle the error + * Note: access_key type of anonymous_access_key is a symbol, otherwise it is a string (not SensitiveString) + * @param {Symbol|string} access_key + * @returns {Promise} + */ + stat_account_config_file(access_key) { + let path_for_account_or_user_config_file; + if (typeof access_key === 'symbol' && access_key === anonymous_access_key) { // anonymous account case + path_for_account_or_user_config_file = this.get_account_path_by_name(config.ANONYMOUS_ACCOUNT_NAME); + } else if (typeof access_key === 'string') { // rest of the cases + path_for_account_or_user_config_file = this.get_account_or_user_path_by_access_key(access_key); + } else { // we should not get here + throw new Error(`access_key must be a from valid type ${typeof access_key} ${access_key}`); + } + return nb_native().fs.stat(this.fs_context, path_for_account_or_user_config_file); + } + /** * is_account_exists_by_name returns true if account config path exists in config dir * if account does not exist and it's a regular account (not an IAM user) diff --git a/src/sdk/nb.d.ts b/src/sdk/nb.d.ts index 29abd76fb4..33dad4f39d 100644 --- a/src/sdk/nb.d.ts +++ b/src/sdk/nb.d.ts @@ -824,7 +824,8 @@ interface BucketSpace { read_account_by_access_key({ access_key: string }): Promise; read_bucket_sdk_info({ name: string }): Promise; - check_same_stat(bucket_name: string, bucket_stat: nb.NativeFSStats); // only implemented in bucketspace_fs + check_same_stat_bucket(bucket_name: string, bucket_stat: nb.NativeFSStats); // only implemented in bucketspace_fs + check_same_stat_account(account_name: string|Symbol, account_stat: nb.NativeFSStats); // only implemented in bucketspace_fs list_buckets(params: object, object_sdk: ObjectSDK): Promise; read_bucket(params: object): Promise; diff --git a/src/sdk/object_sdk.js b/src/sdk/object_sdk.js index 864e9a1ae6..031ec858e3 100644 --- a/src/sdk/object_sdk.js +++ b/src/sdk/object_sdk.js @@ -45,10 +45,10 @@ const bucket_namespace_cache = new LRUCache({ validate: (data, params) => params.sdk._validate_bucket_namespace(data, params), }); +// TODO: account_cache should be in account_sdk const account_cache = new LRUCache({ name: 'AccountCache', - // TODO: Decide on a time that we want to invalidate - expiry_ms: Number(process.env.ACCOUNTS_CACHE_EXPIRY) || 10 * 60 * 1000, + expiry_ms: config.OBJECT_SDK_ACCOUNT_CACHE_EXPIRY_MS, /** * Set type for the generic template * @param {{ @@ -58,6 +58,7 @@ const account_cache = new LRUCache({ */ make_key: ({ access_key }) => access_key, load: async ({ bucketspace, access_key }) => bucketspace.read_account_by_access_key({ access_key }), + validate: (data, params) => _validate_account(data, params), }); const dn_cache = new LRUCache({ @@ -79,6 +80,26 @@ const MULTIPART_NAMESPACES = [ ]; const required_obj_properties = ['obj_id', 'bucket', 'key', 'size', 'content_type', 'etag']; +/** _validate_account is an additional layer (to expiry_ms) + * and in NC deployment it checks the stat of the config file + * @param {object} data + * @param {object} params + * @returns Promise<{boolean>} + */ +// TODO: account function should be handled in account_sdk +async function _validate_account(data, params) { + // stat check (only in bucketspace FS) + const bs = params.bucketspace; + const bs_allow_stat_account = Boolean(bs.check_same_stat_account); + if (bs_allow_stat_account && config.NC_ENABLE_ACCOUNT_CACHE_STAT_VALIDATION) { + const same_stat = await bs.check_same_stat_account(params.access_key, data.stat); + if (!same_stat) { // config file of bucket was changed + return false; + } + } + return true; +} + class ObjectSDK { /** @@ -298,9 +319,9 @@ class ObjectSDK { const time = Date.now(); // stat check (only in bucketspace FS) const bs = this._get_bucketspace(); - const ns_allow_stat_bucket = Boolean(bs.check_same_stat); - if (ns_allow_stat_bucket && config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION) { - const same_stat = await bs.check_same_stat(params.name, data.bucket.stat); + const bs_allow_stat_bucket = Boolean(bs.check_same_stat_bucket); + if (bs_allow_stat_bucket && config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION) { + const same_stat = await bs.check_same_stat_bucket(params.name, data.bucket.stat); if (!same_stat) { // config file of bucket was changed return false; } diff --git a/src/test/unit_tests/nc_coretest.js b/src/test/unit_tests/nc_coretest.js index 0194419ffe..61e0202d2b 100644 --- a/src/test/unit_tests/nc_coretest.js +++ b/src/test/unit_tests/nc_coretest.js @@ -18,7 +18,7 @@ const config_dir_name = 'nc_coretest_config_root_path'; const master_key_location = `${TMP_PATH}/${config_dir_name}/master_keys.json`; const NC_CORETEST_CONFIG_DIR_PATH = `${TMP_PATH}/${config_dir_name}`; process.env.DEBUG_MODE = 'true'; -process.env.ACCOUNTS_CACHE_EXPIRY = '1'; +// process.env.ACCOUNTS_CACHE_EXPIRY = '1'; In NC we check if the config file was changed as validation process.env.NC_CORETEST = 'true'; require('../../util/dotenv').load(); diff --git a/src/test/unit_tests/test_nc_with_a_couple_of_forks.js b/src/test/unit_tests/test_nc_with_a_couple_of_forks.js index caebdfab03..f6158155d8 100644 --- a/src/test/unit_tests/test_nc_with_a_couple_of_forks.js +++ b/src/test/unit_tests/test_nc_with_a_couple_of_forks.js @@ -1,19 +1,24 @@ /* Copyright (C) 2024 NooBaa */ +/* eslint-disable max-statements */ 'use strict'; const path = require('path'); const _ = require('lodash'); +const fs = require('fs'); const P = require('../../util/promise'); const mocha = require('mocha'); const assert = require('assert'); const fs_utils = require('../../util/fs_utils'); -const { TMP_PATH, generate_nsfs_account, get_new_buckets_path_by_test_env, generate_s3_client, get_coretest_path } = require('../system_tests/test_utils'); +const { TMP_PATH, generate_nsfs_account, get_new_buckets_path_by_test_env, generate_s3_client, + get_coretest_path, exec_manage_cli } = require('../system_tests/test_utils'); +const { TYPES, ACTIONS } = require('../../manage_nsfs/manage_nsfs_constants'); +const ManageCLIResponse = require('../../manage_nsfs/manage_nsfs_cli_responses').ManageCLIResponse; const coretest_path = get_coretest_path(); const coretest = require(coretest_path); const setup_options = { forks: 2, debug: 5 }; coretest.setup(setup_options); -const { rpc_client, EMAIL, get_current_setup_options, stop_nsfs_process, start_nsfs_process } = coretest; +const { rpc_client, EMAIL, get_current_setup_options, stop_nsfs_process, start_nsfs_process, config_dir_name } = coretest; const CORETEST_ENDPOINT = coretest.get_http_address(); @@ -74,4 +79,72 @@ mocha.describe('operations with a couple of forks', async function() { // cleanup await s3_admin.deleteBucket({ Bucket: bucket_name }); }); + + mocha.it('list buckets after regenerate access keys', async function() { + // create additional account + const account_name = 'James'; + const account_options_create = { account_name, uid: 5, gid: 5, config_root: config_dir_name }; + await fs_utils.create_fresh_path(new_bucket_path_param); + await fs.promises.chown(new_bucket_path_param, account_options_create.uid, account_options_create.gid); + await fs.promises.chmod(new_bucket_path_param, 0o700); + const access_details = await generate_nsfs_account(rpc_client, EMAIL, new_bucket_path_param, account_options_create); + // check the account status + const account_options_status = { config_root: config_dir_name, name: account_name}; + const res_account_status = await exec_manage_cli(TYPES.ACCOUNT, ACTIONS.STATUS, account_options_status); + assert.equal(JSON.parse(res_account_status).response.code, ManageCLIResponse.AccountStatus.code); + // generate the s3 client + const s3_uid5_before_access_keys_update = generate_s3_client(access_details.access_key, + access_details.secret_key, CORETEST_ENDPOINT); + // check the connection for the new account (can be any of the forks) + const res_list_buckets = await s3_uid5_before_access_keys_update.listBuckets({}); + assert.equal(res_list_buckets.$metadata.httpStatusCode, 200); + // create a bucket + const bucket_name2 = 'bucket2'; + const res_bucket_create = await s3_uid5_before_access_keys_update.createBucket({ Bucket: bucket_name2 }); + assert.equal(res_bucket_create.$metadata.httpStatusCode, 200); + // update the account + const account_options_update = { config_root: config_dir_name, name: account_name, regenerate: true}; + const res_account_update = await exec_manage_cli(TYPES.ACCOUNT, ACTIONS.UPDATE, account_options_update); + const access_key_id_updated = JSON.parse(res_account_update).response.reply.access_keys[0].access_key; + const secret_key_updated = JSON.parse(res_account_update).response.reply.access_keys[0].secret_key; + const s3_uid5_after_access_keys_update = generate_s3_client(access_key_id_updated, + secret_key_updated, CORETEST_ENDPOINT); + // check the connection for the updated access keys account (can be any of the forks) + const res_list_buckets3 = await s3_uid5_after_access_keys_update.listBuckets({}); + assert.equal(res_list_buckets3.$metadata.httpStatusCode, 200); + + // a couple of requests with the previous access keys (all should failed) + // without checking the stat the expiry is OBJECT_SDK_ACCOUNT_CACHE_EXPIRY_MS + let failed_operations = 0; + let successful_operations = 0; + const number_of_requests = 5; + for (let i = 0; i < number_of_requests; i++) { + try { + await s3_uid5_before_access_keys_update.listBuckets({}); + successful_operations += 1; + } catch (err) { + failed_operations += 1; + } + } + assert.equal(successful_operations, 0); + assert.equal(failed_operations, number_of_requests); + + // a couple of requests with the updated access keys (all should success) + let failed_operations2 = 0; + let successful_operations2 = 0; + const number_of_requests2 = 5; + for (let i = 0; i < number_of_requests2; i++) { + try { + await s3_uid5_after_access_keys_update.listBuckets({}); + successful_operations2 += 1; + } catch (err) { + failed_operations2 += 1; + } + } + assert.equal(successful_operations2, number_of_requests2); + assert.equal(failed_operations2, 0); + + // cleanup + await s3_uid5_after_access_keys_update.deleteBucket({ Bucket: bucket_name2 }); + }); });