Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
223 changes: 134 additions & 89 deletions src/sdk/bucketspace_fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
* permissions
* a. First iteration - map_with_concurrency with concurrency rate of 10 entries at the same time.
* a.1. if entry is dir file/entry is non json - we will return undefined
* a.2. if bucket is unaccessible by bucket policy - we will return undefined
* a.2. if bucket is not owned by the account (or their root account for IAM users) - we will return undefined
* a.3. if underlying storage of the bucket is unaccessible by the account's uid/gid - we will return undefined
* a.4. else - return the bucket config info.
* b. Second iteration - filter empty entries - filter will remove undefined values produced by the map_with_concurrency().
Expand Down Expand Up @@ -271,9 +271,9 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
if (err.rpc_code !== 'NO_SUCH_BUCKET') throw err;
}
if (!bucket) return;
const bucket_policy_accessible = await this.has_bucket_action_permission(bucket, account, 's3:ListBucket');
dbg.log2(`list_buckets: bucket_name ${bucket_name} bucket_policy_accessible`, bucket_policy_accessible);
if (!bucket_policy_accessible) return;

if (!this.has_bucket_ownership_permission(bucket, account)) return;

const fs_accessible = await this.validate_fs_bucket_access(bucket, object_sdk);
if (!fs_accessible) return;
return bucket;
Expand Down Expand Up @@ -921,83 +921,6 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
///// UTILS /////
/////////////////

// TODO: move the following 3 functions - has_bucket_action_permission(), validate_fs_bucket_access(), _has_access_to_nsfs_dir()
// so they can be re-used
async has_bucket_action_permission(bucket, account, action, bucket_path = "") {
dbg.log1('has_bucket_action_permission:', bucket.name.unwrap(), account.name.unwrap(), account._id, bucket.bucket_owner.unwrap());

const is_system_owner = Boolean(bucket.system_owner) && bucket.system_owner.unwrap() === account.name.unwrap();

// If the system owner account wants to access the bucket, allow it
if (is_system_owner) return true;
const is_owner = (bucket.owner_account && bucket.owner_account.id === account._id);
const bucket_policy = bucket.s3_policy;

if (!bucket_policy) {
// in case we do not have bucket policy
// we allow IAM account to access a bucket that that is owned by their root account
const is_iam_and_same_root_account_owner = account.owner !== undefined &&
account.owner === bucket.owner_account.id;
return is_owner || is_iam_and_same_root_account_owner;
}
if (!action) {
throw new Error('has_bucket_action_permission: action is required');
}

let permission_by_name;
const permission_by_id = await bucket_policy_utils.has_bucket_policy_permission(
bucket_policy,
account._id,
action,
`arn:aws:s3:::${bucket.name.unwrap()}${bucket_path}`,
undefined,
{ disallow_public_access: bucket.public_access_block?.restrict_public_buckets }
);
if (permission_by_id === "DENY") return false;
// we (currently) allow account identified to be both id and name,
// so if by-id failed, try also name
if (account.owner === undefined) {
permission_by_name = await bucket_policy_utils.has_bucket_policy_permission(
bucket_policy,
account.name.unwrap(),
action,
`arn:aws:s3:::${bucket.name.unwrap()}${bucket_path}`,
undefined,
{ disallow_public_access: bucket.public_access_block?.restrict_public_buckets }
);
}
if (permission_by_name === 'DENY') return false;
return is_owner || (permission_by_id === 'ALLOW' || permission_by_name === 'ALLOW');
}

async validate_fs_bucket_access(bucket, object_sdk) {
dbg.log1('bucketspace_fs.validate_fs_bucket_access:', bucket.name.unwrap());
const ns = bucket.namespace;
const is_nsfs_bucket = object_sdk.is_nsfs_bucket(ns);
const accessible = is_nsfs_bucket ? await this._has_access_to_nsfs_dir(ns, object_sdk) : false;
dbg.log1('bucketspace_fs.validate_fs_bucket_access:', bucket.name.unwrap(), is_nsfs_bucket, accessible);
return accessible;
}

async _has_access_to_nsfs_dir(ns, object_sdk) {
const account = object_sdk.requesting_account;
dbg.log1('_has_access_to_nsfs_dir: nsr: ', ns, 'account.nsfs_account_config: ', account && account.nsfs_account_config);
// nsfs bucket
if (!account || !account.nsfs_account_config || (account.nsfs_account_config.uid === undefined) ||
(account.nsfs_account_config.gid === undefined)) return false;
try {
dbg.log1('_has_access_to_nsfs_dir: checking access:', ns.write_resource, account.nsfs_account_config.uid, account.nsfs_account_config.gid);
const path_to_check = path.join(ns.write_resource.resource.fs_root_path, ns.write_resource.path || '');
const fs_context = this.prepare_fs_context(object_sdk, ns.write_resource.resource.fs_backend);
await nb_native().fs.checkAccess(fs_context, path_to_check);
return true;
} catch (err) {
dbg.log0('_has_access_to_nsfs_dir: failed', err);
if (err.code === 'ENOENT' || err.code === 'EACCES' || (err.code === 'EPERM' && err.message === 'Operation not permitted')) return false;
throw err;
}
}

is_nsfs_containerized_user_anonymous(token) {
return !token && !process.env.NC_NSFS_NO_DB_ENV;
}
Expand Down Expand Up @@ -1025,10 +948,10 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
/**
* _is_bucket_empty returns true if the given bucket is empty
*
* @param {*} ns
* @param {*} params
* @param {string} name
* @param {nb.ObjectSDK} object_sdk
* @param {*} ns
* @param {*} params
* @param {string} name
* @param {nb.ObjectSDK} object_sdk
*
* @returns {Promise<boolean>}
*/
Expand All @@ -1055,8 +978,8 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
* _generate_reserved_tag_event_args returns the list of reserved
* bucket tags which have been modified and also returns a variable
* indicating if there has been any modifications at all.
* @param {Record<string, string>} prev_tags_objectified
* @param {Array<{key: string, value: string}>} new_tags
* @param {Record<string, string>} prev_tags_objectified
* @param {Array<{key: string, value: string}>} new_tags
*/
static _generate_reserved_tag_event_args(prev_tags_objectified, new_tags) {
let reserved_tag_modified = false;
Expand All @@ -1080,7 +1003,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
}

/**
* @param {Array<{key: string, value: string}>} tagging
* @param {Array<{key: string, value: string}>} tagging
* @returns {Record<string, string>}
*/
static _objectify_tagging_arr(tagging) {
Expand All @@ -1089,7 +1012,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {

/**
* _create_uls creates underylying storage at the given storage_path
* @param {*} fs_context - root fs_context
* @param {*} fs_context - root fs_context
* @param {*} acc_fs_context - fs_context associated with the performing account
* @param {*} name - bucket name
* @param {*} storage_path - bucket's storage path
Expand All @@ -1106,6 +1029,128 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
throw translate_error_codes(error, entity_enum.BUCKET);
}
}

//////////////////
// SHARED UTILS //
//////////////////

// TODO: move the below functions to a shared utils file so they can be re-used

/**
* is_bucket_owner checks if the account is the direct owner of the bucket
* @param {nb.Bucket} bucket
* @param {nb.Account} account
* @returns {boolean}
*/
is_bucket_owner(bucket, account) {
if (!bucket?.owner_account?.id || !account?._id) return false;
return bucket.owner_account.id === account._id;
}

/**
* is_iam_and_same_root_account_owner checks if the account is an IAM user and the bucket is owned by their root account
* @param {nb.Account} account
* @param {nb.Bucket} bucket
* @returns {boolean}
*/
is_iam_and_same_root_account_owner(account, bucket) {
if (!account?.owner || !bucket?.owner_account?.id) return false;
return account.owner === bucket.owner_account.id;
}

/**
* has_bucket_ownership_permission returns true if the account can list the bucket in ListBuckets operation
*
* aws-compliant behavior:
* - Root accounts can list buckets they own
* - IAM users can list their owner buckets
*
* @param {nb.Bucket} bucket
* @param {nb.Account} account
* @returns {boolean}
*/
has_bucket_ownership_permission(bucket, account) {
// check direct ownership
if (this.is_bucket_owner(bucket, account)) return true;

// special case: iam user can list the buckets of their owner
if (this.is_iam_and_same_root_account_owner(account, bucket)) return true;

return false;
}

async has_bucket_action_permission(bucket, account, action, bucket_path = "") {
dbg.log1('has_bucket_action_permission:', bucket.name.unwrap(), account.name.unwrap(), account._id, bucket.bucket_owner.unwrap());

// system_owner is deprecated since version 5.18, so we don't need to check it

// check ownership: direct owner or IAM user inheritance
const is_owner = this.is_bucket_owner(bucket, account);

const bucket_policy = bucket.s3_policy;

if (!bucket_policy) {
// in case we do not have bucket policy
// we allow IAM account to access a bucket that that is owned by their root account
return is_owner || this.is_iam_and_same_root_account_owner(account, bucket);
}
if (!action) {
throw new Error('has_bucket_action_permission: action is required');
}

let permission_by_name;
const permission_by_id = await bucket_policy_utils.has_bucket_policy_permission(
bucket_policy,
account._id,
action,
`arn:aws:s3:::${bucket.name.unwrap()}${bucket_path}`,
undefined,
{ disallow_public_access: bucket.public_access_block?.restrict_public_buckets }
);
if (permission_by_id === "DENY") return false;
// we (currently) allow account identified to be both id and name,
// so if by-id failed, try also name
if (account.owner === undefined) {
permission_by_name = await bucket_policy_utils.has_bucket_policy_permission(
bucket_policy,
account.name.unwrap(),
action,
`arn:aws:s3:::${bucket.name.unwrap()}${bucket_path}`,
undefined,
{ disallow_public_access: bucket.public_access_block?.restrict_public_buckets }
);
}
if (permission_by_name === 'DENY') return false;
return is_owner || (permission_by_id === 'ALLOW' || permission_by_name === 'ALLOW');
}

async validate_fs_bucket_access(bucket, object_sdk) {
dbg.log1('bucketspace_fs.validate_fs_bucket_access:', bucket.name.unwrap());
const ns = bucket.namespace;
const is_nsfs_bucket = object_sdk.is_nsfs_bucket(ns);
const accessible = is_nsfs_bucket ? await this._has_access_to_nsfs_dir(ns, object_sdk) : false;
dbg.log1('bucketspace_fs.validate_fs_bucket_access:', bucket.name.unwrap(), is_nsfs_bucket, accessible);
return accessible;
}

async _has_access_to_nsfs_dir(ns, object_sdk) {
const account = object_sdk.requesting_account;
dbg.log1('_has_access_to_nsfs_dir: nsr: ', ns, 'account.nsfs_account_config: ', account && account.nsfs_account_config);
// nsfs bucket
if (!account || !account.nsfs_account_config || (account.nsfs_account_config.uid === undefined) ||
(account.nsfs_account_config.gid === undefined)) return false;
try {
dbg.log1('_has_access_to_nsfs_dir: checking access:', ns.write_resource, account.nsfs_account_config.uid, account.nsfs_account_config.gid);
const path_to_check = path.join(ns.write_resource.resource.fs_root_path, ns.write_resource.path || '');
const fs_context = this.prepare_fs_context(object_sdk, ns.write_resource.resource.fs_backend);
await nb_native().fs.checkAccess(fs_context, path_to_check);
return true;
} catch (err) {
dbg.log0('_has_access_to_nsfs_dir: failed', err);
if (err.code === 'ENOENT' || err.code === 'EACCES' || (err.code === 'EPERM' && err.message === 'Operation not permitted')) return false;
throw err;
}
}
}

module.exports = BucketSpaceFS;
2 changes: 2 additions & 0 deletions src/sdk/nb.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ interface System extends Base {

interface Account extends Base {
_id: ID;
owner?: ID;
name: string;
system: System;
email: SensitiveString;
Expand Down Expand Up @@ -179,6 +180,7 @@ interface MirrorStatus {

interface Bucket extends Base {
_id: ID;
owner_account?: ID;
deleted?: Date;
name: SensitiveString;
system: System;
Expand Down
61 changes: 37 additions & 24 deletions src/test/unit_tests/nsfs/test_bucketspace_fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -519,28 +519,6 @@ mocha.describe('bucketspace_fs', function() {
const res = await bucketspace_fs.list_buckets({}, dummy_object_sdk_for_iam_account);
assert.equal(res.buckets.length, 0);
});
mocha.it('list buckets - different account with bucket policy (principal by name)', async function() {
// another user created a bucket
// with bucket policy account_user3 can list it
const policy = generate_s3_policy(account_user4.name, test_bucket, ['s3:*']).policy;
const param = { name: test_bucket, policy: policy };
await bucketspace_fs.put_bucket_policy(param);
const dummy_object_sdk_for_iam_account = make_dummy_object_sdk_for_account(dummy_object_sdk, account_user4);
const res = await bucketspace_fs.list_buckets({}, dummy_object_sdk_for_iam_account);
assert.equal(res.buckets.length, 1);
assert.equal(res.buckets[0].name.unwrap(), test_bucket);
});
mocha.it('list buckets - different account with bucket policy (principal by id)', async function() {
// another user created a bucket
// with bucket policy account_user3 can list it
const policy = generate_s3_policy(account_user4._id, test_bucket, ['s3:*']).policy;
const param = { name: test_bucket, policy: policy };
await bucketspace_fs.put_bucket_policy(param);
const dummy_object_sdk_for_iam_account = make_dummy_object_sdk_for_account(dummy_object_sdk, account_user4);
const res = await bucketspace_fs.list_buckets({}, dummy_object_sdk_for_iam_account);
assert.equal(res.buckets.length, 1);
assert.equal(res.buckets[0].name.unwrap(), test_bucket);
});
mocha.afterEach(async function() {
await fs_utils.folder_delete(`${new_buckets_path}/${test_bucket}`);
const file_path = get_config_file_path(CONFIG_SUBDIRS.BUCKETS, test_bucket);
Expand Down Expand Up @@ -928,19 +906,54 @@ mocha.describe('bucketspace_fs', function() {
});

mocha.describe('bucket tagging operations', function() {
const test_bucket_tagging = 'test-bucket-tagging';
mocha.before(async function() {
await create_bucket(test_bucket_tagging);
});

mocha.it('put_bucket_tagging', async function() {
const param = { name: test_bucket, tagging: [{ key: 'k1', value: 'v1' }] };
const param = { name: test_bucket_tagging, tagging: [{ key: 'k1', value: 'v1' }] };
await bucketspace_fs.put_bucket_tagging(param, dummy_object_sdk);
const tag = await bucketspace_fs.get_bucket_tagging(param);
assert.deepEqual(tag, { tagging: param.tagging });
});

mocha.it('delete_bucket_tagging', async function() {
const param = { name: test_bucket };
const param = { name: test_bucket_tagging };
await bucketspace_fs.delete_bucket_tagging(param);
const tag = await bucketspace_fs.get_bucket_tagging(param);
assert.deepEqual(tag, { tagging: [] });
});

mocha.it('put_bucket_tagging - different account with bucket policy (principal by name)', async function() {
const policy = generate_s3_policy(account_user4.name, test_bucket_tagging, ['s3:PutBucketTagging']).policy;
const param = { name: test_bucket_tagging, policy: policy };
await bucketspace_fs.put_bucket_policy(param);
const dummy_object_sdk_for_account = make_dummy_object_sdk_for_account(dummy_object_sdk, account_user4);
const tagging_param = { name: test_bucket_tagging, tagging: [{ key: 'test-key', value: 'test-value' }] };
await bucketspace_fs.put_bucket_tagging(tagging_param, dummy_object_sdk_for_account);
const tagging = await bucketspace_fs.get_bucket_tagging({ name: test_bucket_tagging });
assert.equal(tagging.tagging.length, 1);
assert.equal(tagging.tagging[0].key, 'test-key');
});

mocha.it('put_bucket_tagging - different account with bucket policy (principal by id)', async function() {
const policy = generate_s3_policy(account_user4._id, test_bucket_tagging, ['s3:PutBucketTagging']).policy;
const param = { name: test_bucket_tagging, policy: policy };
await bucketspace_fs.put_bucket_policy(param);
const dummy_object_sdk_for_account = make_dummy_object_sdk_for_account(dummy_object_sdk, account_user4);
const tagging_param = { name: test_bucket_tagging, tagging: [{ key: 'test-key', value: 'test-value' }] };
await bucketspace_fs.put_bucket_tagging(tagging_param, dummy_object_sdk_for_account);
const tagging = await bucketspace_fs.get_bucket_tagging({ name: test_bucket_tagging });
assert.equal(tagging.tagging.length, 1);
assert.equal(tagging.tagging[0].key, 'test-key');
});

mocha.after(async function() {
await fs_utils.folder_delete(`${new_buckets_path}/${test_bucket_tagging}`);
const file_path = get_config_file_path(CONFIG_SUBDIRS.BUCKETS, test_bucket_tagging);
await fs_utils.file_delete(file_path);
});
});

mocha.describe('bucket lifecycle operations', function() {
Expand Down