Skip to content

Commit

Permalink
Merge pull request #8365 from liranmauda/liran-backport-into-5.17
Browse files Browse the repository at this point in the history
[Backport into 5.17] Backport fixes
  • Loading branch information
liranmauda authored Sep 16, 2024
2 parents be279bf + 5162181 commit 2254a6b
Show file tree
Hide file tree
Showing 11 changed files with 262 additions and 63 deletions.
3 changes: 2 additions & 1 deletion config.js
Original file line number Diff line number Diff line change
Expand Up @@ -764,7 +764,8 @@ config.NSFS_REMOVE_PARTS_ON_COMPLETE = true;
config.NSFS_BUF_POOL_WARNING_TIMEOUT = 2 * 60 * 1000;
config.NSFS_SEM_WARNING_TIMEOUT = 10 * 60 * 1000;
// number of rename retries in case of deleted destination directory
config.NSFS_RENAME_RETRIES = 3;
config.NSFS_RENAME_RETRIES = 10;
config.NSFS_MKDIR_PATH_RETRIES = 3;

config.NSFS_VERSIONING_ENABLED = true;
config.NSFS_UPDATE_ISSUES_REPORT_ENABLED = true;
Expand Down
2 changes: 1 addition & 1 deletion src/endpoint/sts/sts_rest.js
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ function _is_statements_fit(statements, method, cur_account_email) {
// who can do that action
for (const principal of statement.principal) {
dbg.log0('assume_role_policy: principal fit?', principal.unwrap().toString(), cur_account_email);
if (principal.unwrap() === cur_account_email) {
if ((principal.unwrap() === cur_account_email) || (principal.unwrap() === '*')) {
principal_fit = true;
}
}
Expand Down
16 changes: 11 additions & 5 deletions src/sdk/namespace_blob.js
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,17 @@ class NamespaceBlob {
} catch (err) {
this._translate_error_code(err);
dbg.warn('NamespaceBlob.read_object_md:', inspect(err));
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: err.code || (err.details && err.details.errorCode) || 'InternalError',
time: Date.now(),
});

// It's totally expected to issue `HeadObject` against an object that doesn't exist
// this shouldn't be counted as an issue for the namespace store
if (err.rpc_code !== 'NO_SUCH_OBJECT') {
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: err.code || (err.details && err.details.errorCode) || 'InternalError',
time: Date.now(),
});
}

throw err;
}
}
Expand Down
54 changes: 32 additions & 22 deletions src/sdk/namespace_fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ const multi_buffer_pool = new buffer_utils.MultiSizeBuffersPool({

const XATTR_USER_PREFIX = 'user.';
const XATTR_NOOBAA_INTERNAL_PREFIX = XATTR_USER_PREFIX + 'noobaa.';
const XATTR_NOOBAA_CUSTOM_PREFIX = XATTR_NOOBAA_INTERNAL_PREFIX + 'tag.';
// TODO: In order to verify validity add content_md5_mtime as well
const XATTR_MD5_KEY = XATTR_USER_PREFIX + 'content_md5';
const XATTR_CONTENT_TYPE = XATTR_NOOBAA_INTERNAL_PREFIX + 'content_type';
Expand All @@ -63,6 +62,7 @@ const XATTR_VERSION_ID = XATTR_NOOBAA_INTERNAL_PREFIX + 'version_id';
const XATTR_PREV_VERSION_ID = XATTR_NOOBAA_INTERNAL_PREFIX + 'prev_version_id';
const XATTR_DELETE_MARKER = XATTR_NOOBAA_INTERNAL_PREFIX + 'delete_marker';
const XATTR_DIR_CONTENT = XATTR_NOOBAA_INTERNAL_PREFIX + 'dir_content';
const XATTR_TAG = XATTR_NOOBAA_INTERNAL_PREFIX + 'tag.';
const HIDDEN_VERSIONS_PATH = '.versions';
const NULL_VERSION_ID = 'null';
const NULL_VERSION_SUFFIX = '_' + NULL_VERSION_ID;
Expand Down Expand Up @@ -399,17 +399,21 @@ const versions_dir_cache = new LRUCache({
},
validate: async ({ stat, ver_dir_stat }, { dir_path, fs_context }) => {
const new_stat = await nb_native().fs.stat(fs_context, dir_path);
if (ver_dir_stat) {
const versions_dir_path = path.normalize(path.join(dir_path, '/', HIDDEN_VERSIONS_PATH));
const new_versions_stat = await nb_native().fs.stat(fs_context, versions_dir_path);
return (new_stat.ino === stat.ino &&
new_stat.mtimeNsBigint === stat.mtimeNsBigint &&
new_versions_stat.ino === ver_dir_stat.ino &&
new_versions_stat.mtimeNsBigint === ver_dir_stat.mtimeNsBigint);
} else {
return (new_stat.ino === stat.ino &&
new_stat.mtimeNsBigint === stat.mtimeNsBigint);
const versions_dir_path = path.normalize(path.join(dir_path, '/', HIDDEN_VERSIONS_PATH));
let new_versions_stat;
try {
new_versions_stat = await nb_native().fs.stat(fs_context, versions_dir_path);
} catch (err) {
if (err.code === 'ENOENT') {
dbg.log0('NamespaceFS: Version dir not found, ', versions_dir_path);
} else {
throw err;
}
}
return (new_stat.ino === stat.ino &&
new_stat.mtimeNsBigint === stat.mtimeNsBigint &&
new_versions_stat?.ino === ver_dir_stat?.ino &&
new_versions_stat?.mtimeNsBigint === ver_dir_stat?.mtimeNsBigint);
},
item_usage: ({ usage }, dir_path) => usage,
max_usage: config.NSFS_DIR_CACHE_MAX_TOTAL_SIZE,
Expand Down Expand Up @@ -1287,7 +1291,7 @@ class NamespaceFS {
const same_inode = params.copy_source && copy_res === copy_status_enum.SAME_INODE;
const is_dir_content = this._is_directory_content(file_path, params.key);

let stat = await target_file.stat(fs_context);
const stat = await target_file.stat(fs_context);
this._verify_encryption(params.encryption, this._get_encryption_info(stat));

// handle xattr
Expand Down Expand Up @@ -1331,15 +1335,14 @@ class NamespaceFS {

if (!same_inode && !part_upload) {
await this._move_to_dest(fs_context, upload_path, file_path, target_file, open_mode, params.key);
if (config.NSFS_TRIGGER_FSYNC) await nb_native().fs.fsync(fs_context, path.dirname(file_path));
}

// when object is a dir, xattr are set on the folder itself and the content is in .folder file
if (is_dir_content) {
if (params.copy_source) fs_xattr = await this._get_copy_source_xattr(params, fs_context, fs_xattr);
await this._assign_dir_content_to_xattr(fs_context, fs_xattr, { ...params, size: stat.size }, copy_xattr);
}
stat = await nb_native().fs.stat(fs_context, file_path);
stat.xattr = { ...stat.xattr, ...fs_xattr };
const upload_info = this._get_upload_info(stat, fs_xattr && fs_xattr[XATTR_VERSION_ID]);
return upload_info;
}
Expand Down Expand Up @@ -1392,6 +1395,7 @@ class NamespaceFS {
} else {
await this._move_to_dest_version(fs_context, source_path, dest_path, target_file, key, open_mode);
}
if (config.NSFS_TRIGGER_FSYNC) await nb_native().fs.fsync(fs_context, path.dirname(dest_path));
break;
} catch (err) {
retries -= 1;
Expand Down Expand Up @@ -1469,9 +1473,10 @@ class NamespaceFS {
break;
} catch (err) {
retries -= 1;
if (retries <= 0 || !native_fs_utils.should_retry_link_unlink(is_gpfs, err)) throw err;
dbg.warn(`NamespaceFS._move_to_dest_version retrying retries=${retries}` +
const should_retry = native_fs_utils.should_retry_link_unlink(is_gpfs, err);
dbg.warn(`NamespaceFS._move_to_dest_version retrying retries=${retries} should_retry=${should_retry}` +
` new_ver_tmp_path=${new_ver_tmp_path} latest_ver_path=${latest_ver_path}`, err);
if (!should_retry || retries <= 0) throw err;
} finally {
if (gpfs_options) await this._close_files_gpfs(fs_context, gpfs_options.move_to_dst, open_mode);
}
Expand Down Expand Up @@ -1962,9 +1967,9 @@ class NamespaceFS {
const stat = await file.stat(fs_context);
if (stat.xattr) {
for (const [xattr_key, xattr_value] of Object.entries(stat.xattr)) {
if (xattr_key.includes(XATTR_NOOBAA_CUSTOM_PREFIX)) {
if (xattr_key.includes(XATTR_TAG)) {
tag_set.push({
key: xattr_key.replace(XATTR_NOOBAA_CUSTOM_PREFIX, ''),
key: xattr_key.replace(XATTR_TAG, ''),
value: xattr_value,
});
}
Expand All @@ -1988,7 +1993,7 @@ class NamespaceFS {
}
const fs_context = this.prepare_fs_context(object_sdk);
try {
await this._clear_user_xattr(fs_context, file_path, XATTR_NOOBAA_CUSTOM_PREFIX);
await this._clear_user_xattr(fs_context, file_path, XATTR_TAG);
} catch (err) {
dbg.error(`NamespaceFS.delete_object_tagging: failed in dir ${file_path} with error: `, err);
throw native_fs_utils.translate_error_codes(err, native_fs_utils.entity_enum.OBJECT);
Expand All @@ -2000,7 +2005,7 @@ class NamespaceFS {
const fs_xattr = {};
const tagging = params.tagging && Object.fromEntries(params.tagging.map(tag => ([tag.key, tag.value])));
for (const [xattr_key, xattr_value] of Object.entries(tagging)) {
fs_xattr[XATTR_NOOBAA_CUSTOM_PREFIX + xattr_key] = xattr_value;
fs_xattr[XATTR_TAG + xattr_key] = xattr_value;
}
let file_path;
if (params.version_id && this._is_versioning_enabled()) {
Expand All @@ -2012,7 +2017,7 @@ class NamespaceFS {
dbg.log0('NamespaceFS.put_object_tagging: fs_xattr ', fs_xattr, 'file_path :', file_path);
try {
// remove existng tag before putting new tags
await this._clear_user_xattr(fs_context, file_path, XATTR_NOOBAA_CUSTOM_PREFIX);
await this._clear_user_xattr(fs_context, file_path, XATTR_TAG);
await this.set_fs_xattr_op(fs_context, file_path, fs_xattr, undefined);
} catch (err) {
dbg.error(`NamespaceFS.put_object_tagging: failed in dir ${file_path} with error: `, err);
Expand Down Expand Up @@ -2311,6 +2316,10 @@ class NamespaceFS {
return xattr[XATTR_MD5_KEY];
}

_number_of_tags_fs_xttr(xattr) {
return Object.keys(xattr).filter(xattr_key => xattr_key.includes(XATTR_TAG)).length;
}

/**
* @param {string} bucket
* @param {string} key
Expand All @@ -2331,6 +2340,7 @@ class NamespaceFS {
mime.getType(key) || 'application/octet-stream';
const storage_class = s3_utils.parse_storage_class(stat.xattr?.[XATTR_STORAGE_CLASS_KEY]);
const size = Number(stat.xattr?.[XATTR_DIR_CONTENT] || stat.size);
const tag_count = stat.xattr ? this._number_of_tags_fs_xttr(stat.xattr) : 0;

return {
obj_id: etag,
Expand All @@ -2347,9 +2357,9 @@ class NamespaceFS {
storage_class,
restore_status: GlacierBackend.get_restore_status(stat.xattr, new Date(), this._get_file_path({key})),
xattr: to_xattr(stat.xattr),
tag_count,

// temp:
tag_count: 0,
lock_settings: undefined,
md5_b64: undefined,
num_parts: undefined,
Expand Down
20 changes: 15 additions & 5 deletions src/sdk/namespace_s3.js
Original file line number Diff line number Diff line change
Expand Up @@ -217,11 +217,21 @@ class NamespaceS3 {
} catch (err) {
this._translate_error_code(params, err);
dbg.warn('NamespaceS3.read_object_md:', inspect(err));
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: String(err.code),
time: Date.now(),
});

// It's totally expected to issue `HeadObject` against an object that doesn't exist
// this shouldn't be counted as an issue for the namespace store
//
// @TODO: Another error to tolerate is 'InvalidObjectState'. This shouldn't also
// result in IO_ERROR for the namespace however that means we can not do `getObject`
// even when `can_use_get_inline` is true.
if (err.rpc_code !== 'NO_SUCH_OBJECT') {
object_sdk.rpc_client.pool.update_issues_report({
namespace_resource_id: this.namespace_resource_id,
error_code: String(err.code),
time: Date.now(),
});
}

throw err;
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/server/bg_services/replication_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ async function copy_objects_mixed_types(req) {
if (keys_diff_map[key].length === 1) {
const params = {
Bucket: dst_bucket_name.unwrap(),
CopySource: `/${src_bucket_name.unwrap()}/${key}`, // encodeURI for special chars is needed
CopySource: encodeURI(`/${src_bucket_name.unwrap()}/${key}`),
Key: key
};
try {
Expand All @@ -96,7 +96,7 @@ async function copy_objects_mixed_types(req) {
const version = keys_diff_map[key][i].VersionId;
const params = {
Bucket: dst_bucket_name.unwrap(),
CopySource: `/${src_bucket_name.unwrap()}/${key}?versionId=${version}`, // encodeURI for special chars is needed
CopySource: encodeURI(`/${src_bucket_name.unwrap()}/${key}?versionId=${version}`),
Key: key
};
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,9 +163,6 @@ s3tests_boto3/functional/test_s3.py::test_multipart_upload_missing_part
s3tests_boto3/functional/test_s3.py::test_multipart_upload_incorrect_etag
s3tests_boto3/functional/test_s3.py::test_set_bucket_tagging
s3tests_boto3/functional/test_s3.py::test_atomic_dual_conditional_write_1mb
s3tests_boto3/functional/test_s3.py::test_versioning_obj_create_read_remove
s3tests_boto3/functional/test_s3.py::test_versioning_obj_create_versions_remove_all
s3tests_boto3/functional/test_s3.py::test_versioning_obj_create_versions_remove_special_names
s3tests_boto3/functional/test_s3.py::test_versioned_concurrent_object_create_concurrent_remove
s3tests_boto3/functional/test_s3.py::test_encrypted_transfer_1b
s3tests_boto3/functional/test_s3.py::test_encrypted_transfer_1kb
Expand All @@ -180,7 +177,6 @@ s3tests_boto3/functional/test_s3.py::test_bucketv2_policy
s3tests_boto3/functional/test_s3.py::test_bucket_policy_another_bucket
s3tests_boto3/functional/test_s3.py::test_bucketv2_policy_another_bucket
s3tests_boto3/functional/test_s3.py::test_get_obj_tagging
s3tests_boto3/functional/test_s3.py::test_get_obj_head_tagging
s3tests_boto3/functional/test_s3.py::test_put_max_tags
s3tests_boto3/functional/test_s3.py::test_bucket_policy_put_obj_s3_noenc
s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_failed
Expand Down
72 changes: 72 additions & 0 deletions src/test/unit_tests/jest_tests/test_nsfs_concurrency.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
/* Copyright (C) 2016 NooBaa */
'use strict';

const path = require('path');
const P = require('../../../util/promise');
const fs_utils = require('../../../util/fs_utils');
const NamespaceFS = require('../../../sdk/namespace_fs');
const buffer_utils = require('../../../util/buffer_utils');
const { TMP_PATH } = require('../../system_tests/test_utils');
const { crypto_random_string } = require('../../../util/string_utils');
const endpoint_stats_collector = require('../../../sdk/endpoint_stats_collector');

function make_dummy_object_sdk(nsfs_config, uid, gid) {
return {
requesting_account: {
nsfs_account_config: nsfs_config && {
uid: uid || process.getuid(),
gid: gid || process.getgid(),
backend: '',
}
},
abort_controller: new AbortController(),
throw_if_aborted() {
if (this.abort_controller.signal.aborted) throw new Error('request aborted signal');
}
};
}

const DUMMY_OBJECT_SDK = make_dummy_object_sdk(true);
describe('test nsfs concurrency', () => {
const tmp_fs_path = path.join(TMP_PATH, 'test_nsfs_concurrency');

const nsfs = new NamespaceFS({
bucket_path: tmp_fs_path,
bucket_id: '1',
namespace_resource_id: undefined,
access_mode: undefined,
versioning: 'DISABLED',
force_md5_etag: false,
stats: endpoint_stats_collector.instance(),
});

beforeEach(async () => {
await fs_utils.create_fresh_path(tmp_fs_path);
});

afterEach(async () => {
await fs_utils.folder_delete(tmp_fs_path);
});

it('multiple puts of the same nested key', async () => {
const bucket = 'bucket1';
const key = 'dir1/key1';
const res_etags = [];
for (let i = 0; i < 15; i++) {
const random_data = Buffer.from(String(crypto_random_string(7)));
const body = buffer_utils.buffer_to_read_stream(random_data);
nsfs.upload_object({ bucket: bucket, key: key, source_stream: body }, DUMMY_OBJECT_SDK)
.catch(err => {
console.log('put the same key error - ', err);
throw err;
}).then(res => {
console.log('upload res', res);
res_etags.push(res.etag);
});
await nsfs.delete_object({ bucket: bucket, key: key }, DUMMY_OBJECT_SDK).catch(err => console.log('delete the same key error - ', err));

}
await P.delay(5000);
expect(res_etags).toHaveLength(15);
}, 6000);
});
Loading

0 comments on commit 2254a6b

Please sign in to comment.