Skip to content

Commit

Permalink
Merge pull request #8585 from shirady/nsfs-nc-account-cache-with-stat
Browse files Browse the repository at this point in the history
NC | NSFS | Add stat to `account_cache`
  • Loading branch information
shirady authored Dec 23, 2024
2 parents 5bb3175 + b2ce224 commit 7bc814b
Show file tree
Hide file tree
Showing 7 changed files with 161 additions and 13 deletions.
5 changes: 5 additions & 0 deletions config.js
Original file line number Diff line number Diff line change
Expand Up @@ -637,9 +637,14 @@ config.INLINE_MAX_SIZE = 4096;

// Object SDK bucket cache expiration time
config.OBJECT_SDK_BUCKET_CACHE_EXPIRY_MS = 60000;
// Object SDK account cache expiration time
config.OBJECT_SDK_ACCOUNT_CACHE_EXPIRY_MS = Number(process.env.ACCOUNTS_CACHE_EXPIRY) || 10 * 60 * 1000; // TODO: Decide on a time that we want to invalidate


// Object SDK bucket_namespace_cache allow stat of the config file
config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION = true;
// Object SDK account_cache allow stat of the config file
config.NC_ENABLE_ACCOUNT_CACHE_STAT_VALIDATION = true;

//////////////////////////////
// OPERATOR RELATED //
Expand Down
36 changes: 32 additions & 4 deletions src/sdk/bucketspace_fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
return fs_context;
}

// TODO: account function should be handled in accountspace_fs
async read_account_by_access_key({ access_key }) {
try {
if (!access_key) throw new Error('no access key');
Expand All @@ -85,6 +86,12 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
if (account.nsfs_account_config.distinguished_name) {
account.nsfs_account_config.distinguished_name = new SensitiveString(account.nsfs_account_config.distinguished_name);
}
try {
account.stat = await this.config_fs.stat_account_config_file(access_key);
} catch (err) {
dbg.warn(`BucketspaceFS.read_account_by_access_key could not stat_account_config_file` +
`of account id: ${account._id} account name: ${account.name.unwrap()}`);
}
return account;
} catch (err) {
dbg.error('BucketSpaceFS.read_account_by_access_key: failed with error', err);
Expand Down Expand Up @@ -186,18 +193,39 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
}

/**
* check_same_stat will return true the config file was not changed
* check_same_stat_bucket will return true the config file was not changed
* in case we had any issue (for example error during stat) the returned value will be undefined
* @param {string} bucket_name
* @param {nb.NativeFSStats} bucket_stat
* @returns Promise<{boolean>}
* @returns Promise<{boolean|undefined>}
*/
async check_same_stat(bucket_name, bucket_stat) {
async check_same_stat_bucket(bucket_name, bucket_stat) {
try {
const current_stat = await this.config_fs.stat_bucket_config_file(bucket_name);
if (current_stat) {
return current_stat.ino === bucket_stat.ino && current_stat.mtimeNsBigint === bucket_stat.mtimeNsBigint;
}
} catch (err) {
dbg.warn('check_same_stat: current_stat got an error', err, 'ignoring...');
dbg.warn('check_same_stat_bucket: current_stat got an error', err, 'ignoring...');
}
}

/**
* check_same_stat_account will return true the config file was not changed
* in case we had any issue (for example error during stat) the returned value will be undefined
* @param {Symbol|string} access_key
* @param {nb.NativeFSStats} account_stat
* @returns Promise<{boolean|undefined>}
*/
// TODO: account function should be handled in accountspace_fs
async check_same_stat_account(access_key, account_stat) {
try {
const current_stat = await this.config_fs.stat_account_config_file(access_key);
if (current_stat) {
return current_stat.ino === account_stat.ino && current_stat.mtimeNsBigint === account_stat.mtimeNsBigint;
}
} catch (err) {
dbg.warn('check_same_stat_account: current_stat got an error', err, 'ignoring...');
}
}

Expand Down
20 changes: 20 additions & 0 deletions src/sdk/config_fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ const { RpcError } = require('../rpc');
const nc_mkm = require('../manage_nsfs/nc_master_key_manager').get_instance();
const nsfs_schema_utils = require('../manage_nsfs/nsfs_schema_utils');
const { version_compare } = require('../upgrade/upgrade_utils');
const { anonymous_access_key } = require('./object_sdk');

/** @typedef {import('fs').Dirent} Dirent */

Expand Down Expand Up @@ -448,6 +449,25 @@ class ConfigFS {
return path.join(this.old_accounts_dir_path, this.json(account_name));
}

/**
* stat_account_config_file will return the stat output on account config file
* please notice that stat might throw an error - you should wrap it with try-catch and handle the error
* Note: access_key type of anonymous_access_key is a symbol, otherwise it is a string (not SensitiveString)
* @param {Symbol|string} access_key
* @returns {Promise<nb.NativeFSStats>}
*/
stat_account_config_file(access_key) {
let path_for_account_or_user_config_file;
if (typeof access_key === 'symbol' && access_key === anonymous_access_key) { // anonymous account case
path_for_account_or_user_config_file = this.get_account_path_by_name(config.ANONYMOUS_ACCOUNT_NAME);
} else if (typeof access_key === 'string') { // rest of the cases
path_for_account_or_user_config_file = this.get_account_or_user_path_by_access_key(access_key);
} else { // we should not get here
throw new Error(`access_key must be a from valid type ${typeof access_key} ${access_key}`);
}
return nb_native().fs.stat(this.fs_context, path_for_account_or_user_config_file);
}

/**
* is_account_exists_by_name returns true if account config path exists in config dir
* if account does not exist and it's a regular account (not an IAM user)
Expand Down
3 changes: 2 additions & 1 deletion src/sdk/nb.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,8 @@ interface BucketSpace {

read_account_by_access_key({ access_key: string }): Promise<any>;
read_bucket_sdk_info({ name: string }): Promise<any>;
check_same_stat(bucket_name: string, bucket_stat: nb.NativeFSStats); // only implemented in bucketspace_fs
check_same_stat_bucket(bucket_name: string, bucket_stat: nb.NativeFSStats); // only implemented in bucketspace_fs
check_same_stat_account(account_name: string|Symbol, account_stat: nb.NativeFSStats); // only implemented in bucketspace_fs

list_buckets(params: object, object_sdk: ObjectSDK): Promise<any>;
read_bucket(params: object): Promise<any>;
Expand Down
31 changes: 26 additions & 5 deletions src/sdk/object_sdk.js
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,10 @@ const bucket_namespace_cache = new LRUCache({
validate: (data, params) => params.sdk._validate_bucket_namespace(data, params),
});

// TODO: account_cache should be in account_sdk
const account_cache = new LRUCache({
name: 'AccountCache',
// TODO: Decide on a time that we want to invalidate
expiry_ms: Number(process.env.ACCOUNTS_CACHE_EXPIRY) || 10 * 60 * 1000,
expiry_ms: config.OBJECT_SDK_ACCOUNT_CACHE_EXPIRY_MS,
/**
* Set type for the generic template
* @param {{
Expand All @@ -58,6 +58,7 @@ const account_cache = new LRUCache({
*/
make_key: ({ access_key }) => access_key,
load: async ({ bucketspace, access_key }) => bucketspace.read_account_by_access_key({ access_key }),
validate: (data, params) => _validate_account(data, params),
});

const dn_cache = new LRUCache({
Expand All @@ -79,6 +80,26 @@ const MULTIPART_NAMESPACES = [
];
const required_obj_properties = ['obj_id', 'bucket', 'key', 'size', 'content_type', 'etag'];

/** _validate_account is an additional layer (to expiry_ms)
* and in NC deployment it checks the stat of the config file
* @param {object} data
* @param {object} params
* @returns Promise<{boolean>}
*/
// TODO: account function should be handled in account_sdk
async function _validate_account(data, params) {
// stat check (only in bucketspace FS)
const bs = params.bucketspace;
const bs_allow_stat_account = Boolean(bs.check_same_stat_account);
if (bs_allow_stat_account && config.NC_ENABLE_ACCOUNT_CACHE_STAT_VALIDATION) {
const same_stat = await bs.check_same_stat_account(params.access_key, data.stat);
if (!same_stat) { // config file of bucket was changed
return false;
}
}
return true;
}

class ObjectSDK {

/**
Expand Down Expand Up @@ -298,9 +319,9 @@ class ObjectSDK {
const time = Date.now();
// stat check (only in bucketspace FS)
const bs = this._get_bucketspace();
const ns_allow_stat_bucket = Boolean(bs.check_same_stat);
if (ns_allow_stat_bucket && config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION) {
const same_stat = await bs.check_same_stat(params.name, data.bucket.stat);
const bs_allow_stat_bucket = Boolean(bs.check_same_stat_bucket);
if (bs_allow_stat_bucket && config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION) {
const same_stat = await bs.check_same_stat_bucket(params.name, data.bucket.stat);
if (!same_stat) { // config file of bucket was changed
return false;
}
Expand Down
2 changes: 1 addition & 1 deletion src/test/unit_tests/nc_coretest.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ const config_dir_name = 'nc_coretest_config_root_path';
const master_key_location = `${TMP_PATH}/${config_dir_name}/master_keys.json`;
const NC_CORETEST_CONFIG_DIR_PATH = `${TMP_PATH}/${config_dir_name}`;
process.env.DEBUG_MODE = 'true';
process.env.ACCOUNTS_CACHE_EXPIRY = '1';
// process.env.ACCOUNTS_CACHE_EXPIRY = '1'; In NC we check if the config file was changed as validation
process.env.NC_CORETEST = 'true';

require('../../util/dotenv').load();
Expand Down
77 changes: 75 additions & 2 deletions src/test/unit_tests/test_nc_with_a_couple_of_forks.js
Original file line number Diff line number Diff line change
@@ -1,19 +1,24 @@
/* Copyright (C) 2024 NooBaa */
/* eslint-disable max-statements */
'use strict';

const path = require('path');
const _ = require('lodash');
const fs = require('fs');
const P = require('../../util/promise');
const mocha = require('mocha');
const assert = require('assert');
const fs_utils = require('../../util/fs_utils');
const { TMP_PATH, generate_nsfs_account, get_new_buckets_path_by_test_env, generate_s3_client, get_coretest_path } = require('../system_tests/test_utils');
const { TMP_PATH, generate_nsfs_account, get_new_buckets_path_by_test_env, generate_s3_client,
get_coretest_path, exec_manage_cli } = require('../system_tests/test_utils');
const { TYPES, ACTIONS } = require('../../manage_nsfs/manage_nsfs_constants');
const ManageCLIResponse = require('../../manage_nsfs/manage_nsfs_cli_responses').ManageCLIResponse;

const coretest_path = get_coretest_path();
const coretest = require(coretest_path);
const setup_options = { forks: 2, debug: 5 };
coretest.setup(setup_options);
const { rpc_client, EMAIL, get_current_setup_options, stop_nsfs_process, start_nsfs_process } = coretest;
const { rpc_client, EMAIL, get_current_setup_options, stop_nsfs_process, start_nsfs_process, config_dir_name } = coretest;

const CORETEST_ENDPOINT = coretest.get_http_address();

Expand Down Expand Up @@ -74,4 +79,72 @@ mocha.describe('operations with a couple of forks', async function() {
// cleanup
await s3_admin.deleteBucket({ Bucket: bucket_name });
});

mocha.it('list buckets after regenerate access keys', async function() {
// create additional account
const account_name = 'James';
const account_options_create = { account_name, uid: 5, gid: 5, config_root: config_dir_name };
await fs_utils.create_fresh_path(new_bucket_path_param);
await fs.promises.chown(new_bucket_path_param, account_options_create.uid, account_options_create.gid);
await fs.promises.chmod(new_bucket_path_param, 0o700);
const access_details = await generate_nsfs_account(rpc_client, EMAIL, new_bucket_path_param, account_options_create);
// check the account status
const account_options_status = { config_root: config_dir_name, name: account_name};
const res_account_status = await exec_manage_cli(TYPES.ACCOUNT, ACTIONS.STATUS, account_options_status);
assert.equal(JSON.parse(res_account_status).response.code, ManageCLIResponse.AccountStatus.code);
// generate the s3 client
const s3_uid5_before_access_keys_update = generate_s3_client(access_details.access_key,
access_details.secret_key, CORETEST_ENDPOINT);
// check the connection for the new account (can be any of the forks)
const res_list_buckets = await s3_uid5_before_access_keys_update.listBuckets({});
assert.equal(res_list_buckets.$metadata.httpStatusCode, 200);
// create a bucket
const bucket_name2 = 'bucket2';
const res_bucket_create = await s3_uid5_before_access_keys_update.createBucket({ Bucket: bucket_name2 });
assert.equal(res_bucket_create.$metadata.httpStatusCode, 200);
// update the account
const account_options_update = { config_root: config_dir_name, name: account_name, regenerate: true};
const res_account_update = await exec_manage_cli(TYPES.ACCOUNT, ACTIONS.UPDATE, account_options_update);
const access_key_id_updated = JSON.parse(res_account_update).response.reply.access_keys[0].access_key;
const secret_key_updated = JSON.parse(res_account_update).response.reply.access_keys[0].secret_key;
const s3_uid5_after_access_keys_update = generate_s3_client(access_key_id_updated,
secret_key_updated, CORETEST_ENDPOINT);
// check the connection for the updated access keys account (can be any of the forks)
const res_list_buckets3 = await s3_uid5_after_access_keys_update.listBuckets({});
assert.equal(res_list_buckets3.$metadata.httpStatusCode, 200);

// a couple of requests with the previous access keys (all should failed)
// without checking the stat the expiry is OBJECT_SDK_ACCOUNT_CACHE_EXPIRY_MS
let failed_operations = 0;
let successful_operations = 0;
const number_of_requests = 5;
for (let i = 0; i < number_of_requests; i++) {
try {
await s3_uid5_before_access_keys_update.listBuckets({});
successful_operations += 1;
} catch (err) {
failed_operations += 1;
}
}
assert.equal(successful_operations, 0);
assert.equal(failed_operations, number_of_requests);

// a couple of requests with the updated access keys (all should success)
let failed_operations2 = 0;
let successful_operations2 = 0;
const number_of_requests2 = 5;
for (let i = 0; i < number_of_requests2; i++) {
try {
await s3_uid5_after_access_keys_update.listBuckets({});
successful_operations2 += 1;
} catch (err) {
failed_operations2 += 1;
}
}
assert.equal(successful_operations2, number_of_requests2);
assert.equal(failed_operations2, 0);

// cleanup
await s3_uid5_after_access_keys_update.deleteBucket({ Bucket: bucket_name2 });
});
});

0 comments on commit 7bc814b

Please sign in to comment.