diff --git a/src/endpoint/s3/s3_errors.js b/src/endpoint/s3/s3_errors.js
index 82446ab2fe..6a31359a98 100644
--- a/src/endpoint/s3/s3_errors.js
+++ b/src/endpoint/s3/s3_errors.js
@@ -609,6 +609,7 @@ S3Error.RPC_ERRORS_TO_S3 = Object.freeze({
NO_SUCH_TAG: S3Error.NoSuchTagSet,
INVALID_ENCODING_TYPE: S3Error.InvalidEncodingType,
INVALID_TARGET_BUCKET: S3Error.InvalidTargetBucketForLogging,
+ METHOD_NOT_ALLOWED: S3Error.MethodNotAllowed,
});
exports.S3Error = S3Error;
diff --git a/src/endpoint/s3/s3_rest.js b/src/endpoint/s3/s3_rest.js
index 0c81e60f9c..96a5a4a01f 100755
--- a/src/endpoint/s3/s3_rest.js
+++ b/src/endpoint/s3/s3_rest.js
@@ -383,7 +383,7 @@ function parse_op_name(req) {
return `${method}_object`;
}
-function handle_error(req, res, err) {
+function _prepare_error(req, res, err) {
let s3err =
((err instanceof S3Error) && err) ||
new S3Error(S3Error.RPC_ERRORS_TO_S3[err.rpc_code] || S3Error.InternalError);
@@ -393,19 +393,26 @@ function handle_error(req, res, err) {
s3err.detail = err.rpc_data.detail;
}
- if (s3err.rpc_data) {
- if (s3err.rpc_data.etag) {
+ if (err.rpc_data) {
+ if (err.rpc_data.etag) {
if (res.headersSent) {
dbg.log0('Sent reply in body, bit too late for Etag header');
} else {
- res.setHeader('ETag', s3err.rpc_data.etag);
+ res.setHeader('ETag', err.rpc_data.etag);
}
}
- if (s3err.rpc_data.last_modified) {
+ if (err.rpc_data.last_modified) {
if (res.headersSent) {
dbg.log0('Sent reply in body, bit too late for Last-Modified header');
} else {
- res.setHeader('Last-Modified', time_utils.format_http_header_date(new Date(s3err.rpc_data.last_modified)));
+ res.setHeader('Last-Modified', time_utils.format_http_header_date(new Date(err.rpc_data.last_modified)));
+ }
+ }
+ if (err.rpc_data.delete_marker) {
+ if (res.headersSent) {
+ dbg.log0('Sent reply in body, bit too late for x-amz-delete-marker header');
+ } else {
+ res.setHeader('x-amz-delete-marker', String(err.rpc_data.delete_marker));
}
}
}
@@ -418,6 +425,12 @@ function handle_error(req, res, err) {
usage_report.s3_errors_info.total_errors += 1;
usage_report.s3_errors_info[s3err.code] = (usage_report.s3_errors_info[s3err.code] || 0) + 1;
+ return s3err;
+}
+
+function handle_error(req, res, err) {
+ const s3err = _prepare_error(req, res, err);
+
const reply = s3err.reply(req.originalUrl, req.request_id);
dbg.error('S3 ERROR', reply,
req.method, req.originalUrl,
@@ -436,26 +449,7 @@ function handle_error(req, res, err) {
}
async function _handle_html_response(req, res, err) {
- let s3err =
- ((err instanceof S3Error) && err) ||
- new S3Error(S3Error.RPC_ERRORS_TO_S3[err.rpc_code] || S3Error.InternalError);
-
- if (s3err.rpc_data) {
- if (s3err.rpc_data.etag) {
- res.setHeader('ETag', s3err.rpc_data.etag);
- }
- if (s3err.rpc_data.last_modified) {
- res.setHeader('Last-Modified', time_utils.format_http_header_date(new Date(s3err.rpc_data.last_modified)));
- }
- }
-
- // md_conditions used for PUT/POST/DELETE should return PreconditionFailed instead of NotModified
- if (s3err.code === 'NotModified' && req.method !== 'HEAD' && req.method !== 'GET') {
- s3err = new S3Error(S3Error.PreconditionFailed);
- }
-
- usage_report.s3_errors_info.total_errors += 1;
- usage_report.s3_errors_info[s3err.code] = (usage_report.s3_errors_info[s3err.code] || 0) + 1;
+ const s3err = _prepare_error(req, res, err);
const reply = ` \
${s3err.http_code} ${s3err.code} \
diff --git a/src/sdk/namespace_fs.js b/src/sdk/namespace_fs.js
index f876307cce..8222680b33 100644
--- a/src/sdk/namespace_fs.js
+++ b/src/sdk/namespace_fs.js
@@ -59,7 +59,6 @@ const XATTR_PART_OFFSET = XATTR_NOOBAA_INTERNAL_PREFIX + 'part_offset';
const XATTR_PART_SIZE = XATTR_NOOBAA_INTERNAL_PREFIX + 'part_size';
const XATTR_PART_ETAG = XATTR_NOOBAA_INTERNAL_PREFIX + 'part_etag';
const XATTR_VERSION_ID = XATTR_NOOBAA_INTERNAL_PREFIX + 'version_id';
-const XATTR_PREV_VERSION_ID = XATTR_NOOBAA_INTERNAL_PREFIX + 'prev_version_id';
const XATTR_DELETE_MARKER = XATTR_NOOBAA_INTERNAL_PREFIX + 'delete_marker';
const XATTR_DIR_CONTENT = XATTR_NOOBAA_INTERNAL_PREFIX + 'dir_content';
const XATTR_TAG = XATTR_NOOBAA_INTERNAL_PREFIX + 'tag.';
@@ -483,7 +482,7 @@ class NamespaceFS {
}
/**
- * @param {nb.ObjectSDK} object_sdk
+ * @param {nb.ObjectSDK} object_sdk
* @returns {nb.NativeFSContext}
*/
prepare_fs_context(object_sdk) {
@@ -943,7 +942,7 @@ class NamespaceFS {
stat = { ...dir_content_path_stat, xattr };
}
}
- this._throw_if_delete_marker(stat);
+ this._throw_if_delete_marker(stat, params);
return this._get_object_info(params.bucket, params.key, stat, isDir);
} catch (err) {
if (this._should_update_issues_report(params, file_path, err)) {
@@ -989,7 +988,7 @@ class NamespaceFS {
);
const stat = await file.stat(fs_context);
- this._throw_if_delete_marker(stat);
+ this._throw_if_delete_marker(stat, params);
// await this._fail_if_archived_or_sparse_file(fs_context, file_path, stat);
const start = Number(params.start) || 0;
@@ -1090,7 +1089,9 @@ class NamespaceFS {
// end the stream
res.end();
- await stream_utils.wait_finished(res, { signal: object_sdk.abort_controller.signal });
+ // in case of transform streams such as ChunkFS there is also a readable part. since we expect write stream
+ // and don't care about the readable part, set readable: false
+ await stream_utils.wait_finished(res, { readable: false, signal: object_sdk.abort_controller.signal });
object_sdk.throw_if_aborted();
dbg.log0('NamespaceFS: read_object_stream completed file', file_path, {
@@ -1209,9 +1210,7 @@ class NamespaceFS {
}
if (copy_res) {
- if (copy_res === copy_status_enum.FALLBACK) {
- params.copy_source.nsfs_copy_fallback();
- } else {
+ if (copy_res !== copy_status_enum.FALLBACK) {
// open file after copy link/same inode should use read open mode
open_mode = config.NSFS_OPEN_READ_MODE;
if (copy_res === copy_status_enum.SAME_INODE) open_path = file_path;
@@ -1294,10 +1293,8 @@ class NamespaceFS {
const stat = await target_file.stat(fs_context);
this._verify_encryption(params.encryption, this._get_encryption_info(stat));
- // handle xattr
- // assign user xattr on non copy / copy with xattr_copy header provided
const copy_xattr = params.copy_source && params.xattr_copy;
- let fs_xattr = copy_xattr ? undefined : to_fs_xattr(params.xattr);
+ let fs_xattr = to_fs_xattr(params.xattr);
// assign noobaa internal xattr - content type, md5, versioning xattr
if (params.content_type) {
@@ -1316,8 +1313,7 @@ class NamespaceFS {
fs_xattr = await this._assign_part_props_to_fs_xattr(fs_context, params.size, digest, offset, fs_xattr);
}
if (!part_upload && (this._is_versioning_enabled() || this._is_versioning_suspended())) {
- const cur_ver_info = await this._get_version_info(fs_context, file_path);
- fs_xattr = await this._assign_versions_to_fs_xattr(fs_context, cur_ver_info, stat, params.key, fs_xattr);
+ fs_xattr = await this._assign_versions_to_fs_xattr(stat, fs_xattr, undefined);
}
if (!part_upload && params.storage_class) {
fs_xattr = Object.assign(fs_xattr || {}, {
@@ -1339,7 +1335,6 @@ class NamespaceFS {
// when object is a dir, xattr are set on the folder itself and the content is in .folder file
if (is_dir_content) {
- if (params.copy_source) fs_xattr = await this._get_copy_source_xattr(params, fs_context, fs_xattr);
await this._assign_dir_content_to_xattr(fs_context, fs_xattr, { ...params, size: stat.size }, copy_xattr);
}
stat.xattr = { ...stat.xattr, ...fs_xattr };
@@ -1351,34 +1346,21 @@ class NamespaceFS {
await native_fs_utils._make_path_dirs(file_path, fs_context);
const copy_xattr = params.copy_source && params.xattr_copy;
- let fs_xattr = copy_xattr ? {} : to_fs_xattr(params.xattr) || {};
+ let fs_xattr = to_fs_xattr(params.xattr) || {};
if (params.content_type) {
fs_xattr = fs_xattr || {};
fs_xattr[XATTR_CONTENT_TYPE] = params.content_type;
}
- if (params.copy_source) fs_xattr = await this._get_copy_source_xattr(params, fs_context, fs_xattr);
await this._assign_dir_content_to_xattr(fs_context, fs_xattr, params, copy_xattr);
// when .folder exist and it's no upload flow - .folder should be deleted if it exists
- try {
- await nb_native().fs.unlink(fs_context, file_path);
- } catch (err) {
- if (err.code !== 'ENOENT') throw err;
- dbg.log0(`namespace_fs._create_empty_dir_content: dir object file ${config.NSFS_FOLDER_OBJECT_NAME} was already deleted`);
- }
+ await native_fs_utils.unlink_ignore_enoent(fs_context, file_path);
const dir_path = this._get_file_md_path(params);
const stat = await nb_native().fs.stat(fs_context, dir_path);
const upload_info = this._get_upload_info(stat, fs_xattr[XATTR_VERSION_ID]);
return upload_info;
}
- async _get_copy_source_xattr(params, fs_context, fs_xattr) {
- const is_source_dir = params.copy_source.key.endsWith('/');
- const source_file_md_path = await this._find_version_path(fs_context, params.copy_source, is_source_dir);
- const source_stat = await nb_native().fs.stat(fs_context, source_file_md_path);
- return { ...source_stat.xattr, ...fs_xattr };
- }
-
// move to dest GPFS (wt) / POSIX (w / undefined) - non part upload
async _move_to_dest(fs_context, source_path, dest_path, target_file, open_mode, key) {
let retries = config.NSFS_RENAME_RETRIES;
@@ -1447,7 +1429,8 @@ class NamespaceFS {
if (this._is_versioning_suspended()) {
if (latest_ver_info?.version_id_str === NULL_VERSION_ID) {
dbg.log1('NamespaceFS._move_to_dest_version suspended: version ID of the latest version is null - the file will be unlinked');
- await native_fs_utils.safe_unlink(fs_context, latest_ver_path, latest_ver_info, gpfs_options, bucket_tmp_dir_path);
+ await native_fs_utils.safe_unlink(fs_context, latest_ver_path, latest_ver_info,
+ gpfs_options?.delete_version, bucket_tmp_dir_path);
} else {
// remove a version (or delete marker) with null version ID from .versions/ (if exists)
await this._delete_null_version_from_versions_directory(key, fs_context);
@@ -1511,7 +1494,7 @@ class NamespaceFS {
// Can be finetuned further on if needed and inserting the Semaphore logic inside
// Instead of wrapping the whole _upload_stream function (q_buffers lives outside of the data scope of the stream)
async _upload_stream({ fs_context, params, target_file, object_sdk, offset }) {
- const { source_stream } = params;
+ const { source_stream, copy_source } = params;
try {
// Not using async iterators with ReadableStreams due to unsettled promises issues on abort/destroy
const md5_enabled = this._is_force_md5_enabled(object_sdk);
@@ -1526,8 +1509,14 @@ class NamespaceFS {
large_buf_size: multi_buffer_pool.get_buffers_pool(undefined).buf_size
});
chunk_fs.on('error', err1 => dbg.error('namespace_fs._upload_stream: error occured on stream ChunkFS: ', err1));
- await stream_utils.pipeline([source_stream, chunk_fs]);
- await stream_utils.wait_finished(chunk_fs);
+ if (copy_source) {
+ await this.read_object_stream(copy_source, object_sdk, chunk_fs);
+ } else if (params.source_params) {
+ await params.source_ns.read_object_stream(params.source_params, object_sdk, chunk_fs);
+ } else {
+ await stream_utils.pipeline([source_stream, chunk_fs]);
+ await stream_utils.wait_finished(chunk_fs);
+ }
return { digest: chunk_fs.digest, total_bytes: chunk_fs.total_bytes };
} catch (error) {
dbg.error('_upload_stream had error: ', error);
@@ -1813,6 +1802,7 @@ class NamespaceFS {
upload_params.params.xattr = create_params_parsed.xattr;
upload_params.params.storage_class = create_params_parsed.storage_class;
upload_params.digest = MD5Async && (((await MD5Async.digest()).toString('hex')) + '-' + multiparts.length);
+ upload_params.params.content_type = create_params_parsed.content_type;
const upload_info = await this._finish_upload(upload_params);
@@ -1920,11 +1910,7 @@ class NamespaceFS {
async _delete_single_object(fs_context, file_path, params) {
- try {
- await nb_native().fs.unlink(fs_context, file_path);
- } catch (err) {
- if (err.code !== 'ENOENT') throw err;
- }
+ await native_fs_utils.unlink_ignore_enoent(fs_context, file_path);
await this._delete_path_dirs(file_path, fs_context);
// when deleting the data of a directory object, we need to remove the directory dir object xattr
// if the dir still exists - occurs when deleting dir while the dir still has entries in it
@@ -2188,14 +2174,11 @@ class NamespaceFS {
return fs_xattr;
}
- async _assign_versions_to_fs_xattr(fs_context, prev_ver_info, new_ver_stat, key, fs_xattr, delete_marker) {
- if (!prev_ver_info) prev_ver_info = await this.find_max_version_past(fs_context, key);
-
+ async _assign_versions_to_fs_xattr(new_ver_stat, fs_xattr, delete_marker) {
fs_xattr = Object.assign(fs_xattr || {}, {
[XATTR_VERSION_ID]: this._get_version_id_by_mode(new_ver_stat)
});
- if (prev_ver_info) fs_xattr[XATTR_PREV_VERSION_ID] = prev_ver_info.version_id_str;
if (delete_marker) fs_xattr[XATTR_DELETE_MARKER] = delete_marker;
return fs_xattr;
@@ -2674,7 +2657,6 @@ class NamespaceFS {
// mtimeNsBigint - modified timestmap in bigint - last time the content of the file was modified
// ino - refers to the data stored in a particular location
// delete_marker - specifies if the version is a delete marker
- // prev_version_id - specifies the previous version of the wanted version
// path - specifies the path to version
// if version xattr contains version info - return info by xattr
// else - it's a null version - return stat
@@ -2689,7 +2671,6 @@ class NamespaceFS {
...(ver_info_by_xattr || stat),
version_id_str,
delete_marker: stat.xattr[XATTR_DELETE_MARKER],
- prev_version_id: stat.xattr[XATTR_PREV_VERSION_ID],
path: version_path
};
} catch (err) {
@@ -2714,11 +2695,19 @@ class NamespaceFS {
return versioned_path;
}
- _throw_if_delete_marker(stat) {
+ _throw_if_delete_marker(stat, params) {
if (this.versioning === versioning_status_enum.VER_ENABLED || this.versioning === versioning_status_enum.VER_SUSPENDED) {
const xattr_delete_marker = stat.xattr[XATTR_DELETE_MARKER];
if (xattr_delete_marker) {
- throw error_utils.new_error_code('ENOENT', 'Entry is a delete marker');
+ const basic_err = error_utils.new_error_code('ENOENT', 'Entry is a delete marker');
+ if (params.version_id) {
+ // If the specified version in the request is a delete marker,
+ // the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.
+ throw new RpcError('METHOD_NOT_ALLOWED',
+ 'Method not allowed, delete object id of entry delete marker',
+ { last_modified: new Date(stat.mtime), delete_marker: true });
+ }
+ throw basic_err;
}
}
}
@@ -2737,13 +2726,12 @@ class NamespaceFS {
}
/**
- * @param {nb.NativeFSContext} fs_context
- * @param {string} key
- * @param {string} version_id
+ * @param {nb.NativeFSContext} fs_context
+ * @param {string} key
+ * @param {string} version_id
* @returns {Promise<{
* version_id_str: any;
* delete_marker: string;
- * prev_version_id: string;
* path: any;
* mtimeNsBigint: bigint;
* ino: number;
@@ -2770,10 +2758,11 @@ class NamespaceFS {
await this._open_files_gpfs(fs_context, file_path, undefined, undefined, undefined, undefined, true) :
undefined;
const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path();
- await native_fs_utils.safe_unlink(fs_context, file_path, version_info, gpfs_options, bucket_tmp_dir_path);
+ await native_fs_utils.safe_unlink(fs_context, file_path, version_info,
+ gpfs_options?.delete_version, bucket_tmp_dir_path);
return { ...version_info, latest: true };
} else {
- await nb_native().fs.unlink(fs_context, file_path);
+ await native_fs_utils.unlink_ignore_enoent(fs_context, file_path);
}
return version_info;
} catch (err) {
@@ -2862,17 +2851,13 @@ class NamespaceFS {
// condition 2 guards on situations where we don't want to try move max version past to latest
async _promote_version_to_latest(fs_context, params, deleted_version_info, latest_ver_path) {
dbg.log1('Namespace_fs._promote_version_to_latest', params, deleted_version_info, latest_ver_path);
- const deleted_latest = deleted_version_info && deleted_version_info.path === latest_ver_path;
- const prev_version_id = deleted_latest && deleted_version_info.prev_version_id;
let retries = config.NSFS_RENAME_RETRIES;
for (;;) {
try {
const latest_version_info = await this._get_version_info(fs_context, latest_ver_path);
if (latest_version_info) return;
- const max_past_ver_info = (prev_version_id &&
- (await this.get_prev_version_info(fs_context, params.key, prev_version_id))) ||
- (await this.find_max_version_past(fs_context, params.key));
+ const max_past_ver_info = await this.find_max_version_past(fs_context, params.key);
if (!max_past_ver_info || max_past_ver_info.delete_marker) return;
// 2 - if deleted file is a delete marker and is older than max past version - no need to promote max - return
@@ -2926,13 +2911,13 @@ class NamespaceFS {
const versioned_path = latest_ver_info && this._get_version_path(params.key, latest_ver_info.version_id_str);
const versioned_info = latest_ver_info && await this._get_version_info(fs_context, versioned_path);
- gpfs_options = is_gpfs ?
+ dbg.log1('Namespace_fs._delete_latest_version:', latest_ver_info, versioned_path, versioned_info);
+ if (latest_ver_info) {
+ gpfs_options = is_gpfs ?
await this._open_files_gpfs(fs_context, latest_ver_path,
undefined, undefined, undefined, undefined, true, versioned_info) :
undefined;
- dbg.log1('Namespace_fs._delete_latest_version:', latest_ver_info, versioned_path, versioned_info, gpfs_options);
- if (latest_ver_info) {
const suspended_and_latest_is_not_null = this._is_versioning_suspended() &&
latest_ver_info.version_id_str !== NULL_VERSION_ID;
const bucket_tmp_dir_path = this.get_bucket_tmpdir_full_path();
@@ -2947,7 +2932,8 @@ class NamespaceFS {
} else {
// versioning suspended and version_id is null
dbg.log1('NamespaceFS._delete_latest_version: suspended mode version ID of the latest version is null - file will be unlinked');
- await native_fs_utils.safe_unlink(fs_context, latest_ver_path, latest_ver_info, gpfs_options, bucket_tmp_dir_path);
+ await native_fs_utils.safe_unlink(fs_context, latest_ver_path, latest_ver_info,
+ gpfs_options?.delete_version, bucket_tmp_dir_path);
}
}
break;
@@ -3018,16 +3004,7 @@ class NamespaceFS {
}
const file_path = this._get_version_path(params.key, delete_marker_version_id);
- let fs_xattr;
- if (this._is_versioning_suspended() &&
- (deleted_version_info?.version_id_str === NULL_VERSION_ID)) {
- fs_xattr = await this._assign_versions_to_fs_xattr(fs_context, undefined,
- stat, params.key, undefined, true);
- } else {
- // the previous version will be the deleted version
- fs_xattr = await this._assign_versions_to_fs_xattr(fs_context, deleted_version_info,
- stat, params.key, undefined, true);
- }
+ const fs_xattr = await this._assign_versions_to_fs_xattr(stat, undefined, true);
if (fs_xattr) await upload_params.target_file.replacexattr(fs_context, fs_xattr);
// create .version in case we don't have it yet
await native_fs_utils._make_path_dirs(file_path, fs_context);
@@ -3047,12 +3024,6 @@ class NamespaceFS {
}
}
- async get_prev_version_info(fs_context, key, prev_version_id) {
- const prev_path = this._get_version_path(key, prev_version_id);
- const prev_path_info = await this._get_version_info(fs_context, prev_path);
- return prev_path_info;
- }
-
// try find prev version by hint or by iterating on .versions/ dir
async find_max_version_past(fs_context, key) {
const versions_dir = path.normalize(path.join(this.bucket_path, path.dirname(key), HIDDEN_VERSIONS_PATH));
diff --git a/src/sdk/object_sdk.js b/src/sdk/object_sdk.js
index c57efd5a8c..594cb352d1 100644
--- a/src/sdk/object_sdk.js
+++ b/src/sdk/object_sdk.js
@@ -106,8 +106,8 @@ class ObjectSDK {
* in order to handle aborting requests gracefully. The `abort_controller` member will
* be used to signal async flows that abort was detected.
* @see {@link https://nodejs.org/docs/latest/api/globals.html#class-abortcontroller}
- * @param {import('http').IncomingMessage} req
- * @param {import('http').ServerResponse} res
+ * @param {import('http').IncomingMessage} req
+ * @param {import('http').ServerResponse} res
*/
setup_abort_controller(req, res) {
res.once('error', err => {
@@ -158,7 +158,7 @@ class ObjectSDK {
}
/**
- * @param {string} name
+ * @param {string} name
* @returns {Promise}
*/
async _get_bucket_namespace(name) {
@@ -268,7 +268,7 @@ class ObjectSDK {
return Boolean(fs_root_path || fs_root_path === '');
}
- // validates requests for non nsfs buckets from accounts which are nsfs_only
+ // validates requests for non nsfs buckets from accounts which are nsfs_only
has_non_nsfs_bucket_access(account, ns) {
dbg.log1('validate_non_nsfs_bucket: ', account, ns?.write_resource?.resource);
if (!account) return false;
@@ -524,7 +524,7 @@ class ObjectSDK {
/**
* Calls the op and report time and error to stats collector.
* on_success can be added to update read/write stats (but on_success shouln't throw)
- *
+ *
* @template T
* @param {{
* op_name: string;
@@ -642,7 +642,9 @@ class ObjectSDK {
params.content_type = source_md.content_type;
}
try {
- if (params.xattr) params.xattr = _.omitBy(params.xattr, (val, name) => name.startsWith('noobaa-namespace'));
+ //omitBy iterates all xattr calling startsWith on them. this can include symbols such as XATTR_SORT_SYMBOL.
+ //in that case startsWith will not apply
+ if (params.xattr) params.xattr = _.omitBy(params.xattr, (val, name) => name.startsWith?.('noobaa-namespace'));
} catch (e) {
dbg.log3("Got an error while trying to omitBy param.xattr:", params.xattr, "error:", e);
}
@@ -658,12 +660,6 @@ class ObjectSDK {
params.copy_source.bucket = actual_source_ns.get_bucket(bucket);
params.copy_source.obj_id = source_md.obj_id;
params.copy_source.version_id = source_md.version_id;
- if (source_ns instanceof NamespaceFS) {
- params.copy_source.nsfs_copy_fallback = () => {
- this._populate_nsfs_copy_fallback({ source_params, source_ns, params });
- params.copy_source = null;
- };
- }
} else {
// source cannot be copied directly (different plaforms, accounts, etc.)
// set the source_stream to read from the copy source
@@ -671,6 +667,7 @@ class ObjectSDK {
source_params.object_md = source_md;
source_params.obj_id = source_md.obj_id;
source_params.version_id = source_md.version_id;
+ source_params.bucket = actual_source_ns.get_bucket(bucket);
// param size is needed when doing an upload. Can be overrided during ranged writes
params.size = source_md.size;
@@ -684,7 +681,13 @@ class ObjectSDK {
// if the source namespace is NSFS then we need to pass the read_object_stream the read_stream
if (source_ns instanceof NamespaceFS) {
- this._populate_nsfs_copy_fallback({ source_params, source_ns, params });
+ if (target_ns instanceof NamespaceFS) {
+ params.source_ns = actual_source_ns;
+ params.source_params = source_params;
+ } else {
+ //this._populate_nsfs_copy_fallback({ source_params, source_ns, params });
+ throw new Error('TODO fix _populate_nsfs_copy_fallback');
+ }
} else {
params.source_stream = await source_ns.read_object_stream(source_params, this);
}
@@ -701,9 +704,9 @@ class ObjectSDK {
}
}
- // nsfs copy_object & server side copy consisted of link and a fallback to
+ // nsfs copy_object & server side copy consisted of link and a fallback to
// read stream and then upload stream
- // nsfs copy object when can't server side copy - fallback directly
+ // nsfs copy object when can't server side copy - fallback directly
_populate_nsfs_copy_fallback({ source_ns, params, source_params }) {
const read_stream = new stream.PassThrough();
source_ns.read_object_stream(source_params, this, read_stream)
diff --git a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt
index 523cb97d1f..41959593eb 100644
--- a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt
+++ b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_black_list.txt
@@ -349,4 +349,61 @@ s3tests_boto3/functional/test_sts.py::test_assume_role_with_web_identity_resourc
s3tests_boto3/functional/test_sts.py::test_assume_role_with_web_identity_wrong_resource_tag_deny
s3tests_boto3/functional/test_sts.py::test_assume_role_with_web_identity_resource_tag_princ_tag
s3tests_boto3/functional/test_sts.py::test_assume_role_with_web_identity_resource_tag_copy_obj
-s3tests_boto3/functional/test_sts.py::test_assume_role_with_web_identity_role_resource_tag
\ No newline at end of file
+s3tests_boto3/functional/test_sts.py::test_assume_role_with_web_identity_role_resource_tag
+s3tests_boto3/functional/test_s3.py::test_post_object_invalid_signature
+s3tests_boto3/functional/test_s3.py::test_post_object_invalid_access_key
+s3tests_boto3/functional/test_s3.py::test_post_object_missing_policy_condition
+s3tests_boto3/functional/test_s3.py::test_post_object_request_missing_policy_specified_field
+s3tests_boto3/functional/test_s3.py::test_post_object_expired_policy
+s3tests_boto3/functional/test_s3.py::test_post_object_invalid_request_field_value
+s3tests_boto3/functional/test_s3.py::test_post_object_authenticated_request_bad_access_key
+s3tests_boto3/functional/test_s3.py::test_sse_s3_default_post_object_authenticated_request
+s3tests_boto3/functional/test_s3.py::test_sse_kms_default_post_object_authenticated_request
+s3tests_boto3/functional/test_s3.py::test_post_object_upload_size_rgw_chunk_size_bug
+s3tests_boto3/functional/test_s3.py::test_post_object_wrong_bucket
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioning_enabled
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_tags2
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioned_tags2
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_noncur_tags1
+s3tests_boto3/functional/test_s3.py::test_lifecycle_set_date
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_put
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_head
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_tags_head
+s3tests_boto3/functional/test_s3.py::test_lifecycle_transition_set_invalid_date
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_newer_noncurrent
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_size_gt
+s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_size_lt
+s3tests_boto3/functional/test_s3.py::test_object_lock_put_obj_lock_invalid_mode
+s3tests_boto3/functional/test_s3.py::test_object_lock_get_obj_retention_iso8601
+s3tests_boto3/functional/test_s3.py::test_object_lock_delete_object_with_retention_and_marker
+s3tests_boto3/functional/test_s3.py::test_object_lock_multi_delete_object_with_retention
+s3tests_boto3/functional/test_s3.py::test_object_lock_put_legal_hold
+s3tests_boto3/functional/test_s3.py::test_object_lock_get_legal_hold
+s3tests_boto3/functional/test_s3.py::test_object_lock_changing_mode_from_governance_with_bypass
+s3tests_boto3/functional/test_s3.py::test_object_lock_changing_mode_from_governance_without_bypass
+s3tests_boto3/functional/test_s3.py::test_object_lock_changing_mode_from_compliance
+s3tests_boto3/functional/test_s3.py::test_object_lock_delete_multipart_object_with_retention
+s3tests_boto3/functional/test_s3.py::test_object_lock_delete_multipart_object_with_legal_hold_on
+s3tests_boto3/functional/test_s3.py::test_sse_kms_method_head
+s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_1b
+s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_1kb
+s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_1mb
+s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_8mb
+s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_1b
+s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_1kb
+s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_1mb
+s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_8mb
+s3tests_boto3/functional/test_s3.py::test_sse_s3_default_method_head
+s3tests_boto3/functional/test_s3.py::test_sse_s3_default_multipart_upload
+s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_1b
+s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_1kb
+s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_1mb
+s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_8mb
+s3tests_boto3/functional/test_s3.py::test_cors_presigned_get_object
+s3tests_boto3/functional/test_s3.py::test_cors_presigned_get_object_tenant
+s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object
+s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object_with_acl
+s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object_tenant
+s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object_tenant_with_acl
+
diff --git a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_pending_list.txt b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_pending_list.txt
index 0407bb7923..ab191c9776 100644
--- a/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_pending_list.txt
+++ b/src/test/system_tests/ceph_s3_tests/s3-tests-lists/nsfs_s3_tests_pending_list.txt
@@ -9,47 +9,21 @@ s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_basic
s3tests_boto3/functional/test_s3.py::test_bucket_list_prefix_delimiter_basic
s3tests_boto3/functional/test_s3.py::test_account_usage
s3tests_boto3/functional/test_s3.py::test_head_bucket_usage
-s3tests_boto3/functional/test_s3.py::test_post_object_invalid_signature
-s3tests_boto3/functional/test_s3.py::test_post_object_invalid_access_key
-s3tests_boto3/functional/test_s3.py::test_post_object_missing_policy_condition
-s3tests_boto3/functional/test_s3.py::test_post_object_request_missing_policy_specified_field
-s3tests_boto3/functional/test_s3.py::test_post_object_expired_policy
-s3tests_boto3/functional/test_s3.py::test_post_object_invalid_request_field_value
s3tests_boto3/functional/test_s3.py::test_lifecycle_get_no_id
-s3tests_boto3/functional/test_s3.py::test_object_lock_put_obj_lock_invalid_mode
-s3tests_boto3/functional/test_s3.py::test_object_lock_get_obj_retention_iso8601
-s3tests_boto3/functional/test_s3.py::test_object_lock_delete_object_with_retention_and_marker
s3tests_boto3/functional/test_s3.py::test_multipart_upload_on_a_bucket_with_policy
s3tests_boto3/functional/test_s3.py::test_multipart_upload_small
-s3tests_boto3/functional/test_s3.py::test_post_object_authenticated_request_bad_access_key
s3tests_boto3/functional/test_s3.py::test_bucket_create_exists
s3tests_boto3/functional/test_s3.py::test_bucket_create_exists_nonowner
s3tests_boto3/functional/test_s3.py::test_bucket_recreate_overwrite_acl
s3tests_boto3/functional/test_s3.py::test_bucket_recreate_new_acl
s3tests_boto3/functional/test_s3.py::test_list_multipart_upload_owner
s3tests_boto3/functional/test_s3.py::test_lifecycle_set
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioning_enabled
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_tags2
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_versioned_tags2
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_noncur_tags1
-s3tests_boto3/functional/test_s3.py::test_lifecycle_set_date
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_put
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_head
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_header_tags_head
-s3tests_boto3/functional/test_s3.py::test_lifecycle_transition_set_invalid_date
s3tests_boto3/functional/test_s3.py::test_put_obj_enc_conflict_c_s3
s3tests_boto3/functional/test_s3.py::test_put_obj_enc_conflict_c_kms
s3tests_boto3/functional/test_s3.py::test_put_obj_enc_conflict_s3_kms
s3tests_boto3/functional/test_s3.py::test_bucket_policy_put_obj_s3_kms
s3tests_boto3/functional/test_s3.py::test_bucket_policy_put_obj_kms_noenc
s3tests_boto3/functional/test_s3.py::test_bucket_policy_put_obj_kms_s3
-s3tests_boto3/functional/test_s3.py::test_object_lock_multi_delete_object_with_retention
-s3tests_boto3/functional/test_s3.py::test_object_lock_put_legal_hold
-s3tests_boto3/functional/test_s3.py::test_object_lock_get_legal_hold
-s3tests_boto3/functional/test_s3.py::test_object_lock_changing_mode_from_governance_with_bypass
-s3tests_boto3/functional/test_s3.py::test_object_lock_changing_mode_from_governance_without_bypass
-s3tests_boto3/functional/test_s3.py::test_object_lock_changing_mode_from_compliance
s3tests_boto3/functional/test_s3.py::test_object_read_unreadable
s3tests_boto3/functional/test_s3.py::test_user_policy
s3tests_boto3/functional/test_s3.py::test_get_public_acl_bucket_policy_status
@@ -63,15 +37,12 @@ s3tests_boto3/functional/test_s3.py::test_block_public_put_bucket_acls
s3tests_boto3/functional/test_s3.py::test_block_public_object_canned_acls
s3tests_boto3/functional/test_s3.py::test_block_public_policy
s3tests_boto3/functional/test_s3.py::test_ignore_public_acls
-s3tests_boto3/functional/test_s3.py::test_sse_s3_default_post_object_authenticated_request
-s3tests_boto3/functional/test_s3.py::test_sse_kms_default_post_object_authenticated_request
s3tests_boto3/functional/test_s3select.py::test_generate_projection
s3tests_boto3/functional/test_s3select.py::test_alias_cyclic_refernce
s3tests_boto3/functional/test_s3select.py::test_schema_definition
s3tests_boto3/functional/test_s3select.py::test_progress_expressions
s3tests_boto3/functional/test_s3.py::test_object_write_with_chunked_transfer_encoding
s3tests_boto3/functional/test_s3.py::test_versioning_concurrent_multi_object_delete
-s3tests_boto3/functional/test_s3.py::test_post_object_upload_size_rgw_chunk_size_bug
s3tests_boto3/functional/test_s3.py::test_get_object_torrent
s3tests_boto3/functional/test_s3select.py::test_count_json_operation
s3tests_boto3/functional/test_s3select.py::test_column_sum_min_max
@@ -153,15 +124,9 @@ s3tests_boto3/functional/test_s3.py::test_put_object_ifmatch_failed
s3tests_boto3/functional/test_s3.py::test_put_object_ifnonmatch_failed
s3tests_boto3/functional/test_s3.py::test_put_object_ifnonmatch_overwrite_existed_failed
s3tests_boto3/functional/test_s3.py::test_object_raw_authenticated_bucket_gone
-s3tests_boto3/functional/test_s3.py::test_object_copy_to_itself_with_metadata
s3tests_boto3/functional/test_s3.py::test_object_copy_canned_acl
-s3tests_boto3/functional/test_s3.py::test_object_copy_retaining_metadata
-s3tests_boto3/functional/test_s3.py::test_object_copy_replacing_metadata
-s3tests_boto3/functional/test_s3.py::test_object_copy_versioning_multipart_upload
-s3tests_boto3/functional/test_s3.py::test_list_multipart_upload
s3tests_boto3/functional/test_s3.py::test_multipart_upload_missing_part
s3tests_boto3/functional/test_s3.py::test_multipart_upload_incorrect_etag
-s3tests_boto3/functional/test_s3.py::test_set_bucket_tagging
s3tests_boto3/functional/test_s3.py::test_atomic_dual_conditional_write_1mb
s3tests_boto3/functional/test_s3.py::test_versioned_concurrent_object_create_concurrent_remove
s3tests_boto3/functional/test_s3.py::test_encrypted_transfer_1b
@@ -171,13 +136,7 @@ s3tests_boto3/functional/test_s3.py::test_encrypted_transfer_13b
s3tests_boto3/functional/test_s3.py::test_encryption_sse_c_method_head
s3tests_boto3/functional/test_s3.py::test_encryption_sse_c_present
s3tests_boto3/functional/test_s3.py::test_encryption_sse_c_other_key
-s3tests_boto3/functional/test_s3.py::test_sse_kms_method_head
s3tests_boto3/functional/test_s3.py::test_bucket_policy
-s3tests_boto3/functional/test_s3.py::test_bucketv2_policy
-s3tests_boto3/functional/test_s3.py::test_bucket_policy_another_bucket
-s3tests_boto3/functional/test_s3.py::test_bucketv2_policy_another_bucket
-s3tests_boto3/functional/test_s3.py::test_get_obj_tagging
-s3tests_boto3/functional/test_s3.py::test_put_max_tags
s3tests_boto3/functional/test_s3.py::test_bucket_policy_put_obj_s3_noenc
s3tests_boto3/functional/test_s3.py::test_copy_object_ifmatch_failed
s3tests_boto3/functional/test_s3.py::test_copy_object_ifnonematch_good
@@ -185,27 +144,10 @@ s3tests_boto3/functional/test_s3.py::test_get_bucket_encryption_s3
s3tests_boto3/functional/test_s3.py::test_get_bucket_encryption_kms
s3tests_boto3/functional/test_s3.py::test_delete_bucket_encryption_s3
s3tests_boto3/functional/test_s3.py::test_delete_bucket_encryption_kms
-s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_1b
-s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_1kb
-s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_1mb
-s3tests_boto3/functional/test_s3.py::test_sse_s3_default_upload_8mb
-s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_1b
-s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_1kb
-s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_1mb
-s3tests_boto3/functional/test_s3.py::test_sse_kms_default_upload_8mb
-s3tests_boto3/functional/test_s3.py::test_sse_s3_default_method_head
-s3tests_boto3/functional/test_s3.py::test_sse_s3_default_multipart_upload
-s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_1b
-s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_1kb
-s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_1mb
-s3tests_boto3/functional/test_s3.py::test_sse_s3_encrypted_upload_8mb
s3tests/functional/test_s3.py::test_atomic_write_bucket_gone
-s3tests_boto3/functional/test_s3.py::test_bucket_create_delete
s3tests_boto3/functional/test_s3.py::test_atomic_write_bucket_gone
s3tests_boto3/functional/test_s3.py::test_object_raw_get_x_amz_expires_not_expired
s3tests_boto3/functional/test_s3.py::test_object_raw_get_x_amz_expires_not_expired_tenant
-s3tests_boto3/functional/test_s3.py::test_cors_presigned_get_object
-s3tests_boto3/functional/test_s3.py::test_cors_presigned_get_object_tenant
s3tests_boto3/functional/test_s3.py::test_encryption_sse_c_unaligned_multipart_upload
s3tests_boto3/functional/test_s3.py::test_multipart_get_part
s3tests_boto3/functional/test_s3.py::test_non_multipart_get_part
@@ -214,16 +156,6 @@ s3tests/functional/test_headers.py::test_object_create_bad_date_none_aws2
s3tests/functional/test_headers.py::test_bucket_create_bad_authorization_invalid_aws2
s3tests/functional/test_headers.py::test_bucket_create_bad_date_none_aws2
s3tests_boto3/functional/test_s3.py::test_versioned_concurrent_object_create_and_remove
-s3tests_boto3/functional/test_s3.py::test_post_object_wrong_bucket
-s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object
-s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object_with_acl
-s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object_tenant
-s3tests_boto3/functional/test_s3.py::test_cors_presigned_put_object_tenant_with_acl
s3tests_boto3/functional/test_s3.py::test_object_presigned_put_object_with_acl_tenant
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_newer_noncurrent
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_size_gt
-s3tests_boto3/functional/test_s3.py::test_lifecycle_expiration_size_lt
-s3tests_boto3/functional/test_s3.py::test_object_lock_delete_multipart_object_with_retention
-s3tests_boto3/functional/test_s3.py::test_object_lock_delete_multipart_object_with_legal_hold_on
s3tests_boto3/functional/test_s3.py::test_get_undefined_public_block
s3tests_boto3/functional/test_s3.py::test_get_public_block_deny_bucket_policy
\ No newline at end of file
diff --git a/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js b/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js
index faa98e4c8b..022ab00ea9 100644
--- a/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js
+++ b/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js
@@ -59,4 +59,26 @@ describe('test versioning concurrency', () => {
const versions = await nsfs.list_object_versions({ bucket: bucket }, DUMMY_OBJECT_SDK);
expect(versions.objects.length).toBe(5);
});
+
+ it('multiple delete version id and key', async () => {
+ const bucket = 'bucket1';
+ const key = 'key2';
+ const versions_arr = [];
+ // upload 5 versions of key2
+ for (let i = 0; i < 5; i++) {
+ const random_data = Buffer.from(String(i));
+ const body = buffer_utils.buffer_to_read_stream(random_data);
+ const res = await nsfs.upload_object({ bucket: bucket, key: key, source_stream: body }, DUMMY_OBJECT_SDK).catch(err => console.log('put error - ', err));
+ versions_arr.push(res.etag);
+ }
+ const mid_version_id = versions_arr[3];
+ const number_of_successful_operations = [];
+ for (let i = 0; i < 15; i++) {
+ nsfs.delete_object({ bucket: bucket, key: key, version_id: mid_version_id }, DUMMY_OBJECT_SDK)
+ .then(res => number_of_successful_operations.push(res))
+ .catch(err => console.log('delete the same key & version id error - ', err));
+ }
+ await P.delay(1000);
+ expect(number_of_successful_operations.length).toBe(15);
+ });
});
diff --git a/src/test/unit_tests/test_bucketspace_versioning.js b/src/test/unit_tests/test_bucketspace_versioning.js
index d42248b195..988d9ad52a 100644
--- a/src/test/unit_tests/test_bucketspace_versioning.js
+++ b/src/test/unit_tests/test_bucketspace_versioning.js
@@ -21,7 +21,6 @@ coretest.setup({});
const XATTR_INTERNAL_NOOBAA_PREFIX = 'user.noobaa.';
const XATTR_VERSION_ID = XATTR_INTERNAL_NOOBAA_PREFIX + 'version_id';
-const XATTR_PREV_VERSION_ID = XATTR_INTERNAL_NOOBAA_PREFIX + 'prev_version_id';
const XATTR_DELETE_MARKER = XATTR_INTERNAL_NOOBAA_PREFIX + 'delete_marker';
const NULL_VERSION_ID = 'null';
@@ -916,7 +915,6 @@ mocha.describe('bucketspace namespace_fs - versioning', function() {
assert.ok(is_dm);
const version_path = path.join(suspended_full_path, '.versions', key_to_delete3 + '_' + latest_dm_version);
const version_info = await stat_and_get_all(version_path, '');
- assert.equal(version_info.xattr[XATTR_PREV_VERSION_ID], prev_dm.VersionId);
assert.equal(version_info.xattr[XATTR_VERSION_ID], NULL_VERSION_ID);
});
});
@@ -975,8 +973,6 @@ mocha.describe('bucketspace namespace_fs - versioning', function() {
const max_version0 = await find_max_version_past(full_delete_path, key1, '');
const cur_version_id1 = await stat_and_get_version_id(full_delete_path, key1);
assert.equal(upload_res_arr[2].VersionId, cur_version_id1);
- const cur_ver_info = await stat_and_get_all(full_delete_path, key1);
- assert.equal(cur_ver_info.xattr[XATTR_PREV_VERSION_ID], max_version0);
const is_dm = await is_delete_marker(full_delete_path, '', key1, max_version0);
assert.ok(is_dm);
@@ -1292,8 +1288,6 @@ mocha.describe('bucketspace namespace_fs - versioning', function() {
const cur_version_id1 = await stat_and_get_version_id(full_delete_path, key1);
assert.equal(upload_res_arr[2].VersionId, cur_version_id1);
- const cur_ver_info = await stat_and_get_all(full_delete_path, key1);
- assert.equal(cur_ver_info.xattr[XATTR_PREV_VERSION_ID], max_version0);
const is_dm = await is_delete_marker(full_delete_path, '', key1, max_version0);
assert.ok(is_dm);
@@ -2663,6 +2657,33 @@ mocha.describe('bucketspace namespace_fs - versioning', function() {
assert.fail(`Failed with an error: ${err.Code}`);
}
});
+
+ mocha.it('head object, with version enabled, version id specified delete marker - should throw error with code 405', async function() {
+ try {
+ await s3_client.headObject({Bucket: bucket_name, Key: en_version_key, VersionId: versionID_1});
+ assert.fail('Should fail');
+ } catch (err) {
+ assert.strictEqual(err.$metadata.httpStatusCode, 405);
+ // In headObject the AWS SDK doesn't return the err.Code
+ // In AWS CLI it looks:
+ // An error occurred (405) when calling the HeadObject operation: Method Not Allowed
+ // in the docs: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html
+ // if the HEAD request generates an error, it returns a generic code, such as ...
+ // 405 Method Not Allowed, ... It's not possible to retrieve the exact exception of these error codes.
+ }
+ });
+
+ mocha.it('get object, with version enabled, version id specified delete marker - should throw error with code 405', async function() {
+ try {
+ await s3_client.getObject({Bucket: bucket_name, Key: en_version_key, VersionId: versionID_1});
+ assert.fail('Should fail');
+ } catch (err) {
+ assert.strictEqual(err.$metadata.httpStatusCode, 405);
+ assert.strictEqual(err.Code, 'MethodNotAllowed');
+ // In AWS CLI it looks:
+ // An error occurred (MethodNotAllowed) when calling the GetObject operation: The specified method is not allowed against this resource.
+ }
+ });
});
});
@@ -2804,13 +2825,11 @@ async function compare_version_ids(full_path, key, put_result_version_id, prev_v
}
assert.equal(new_version_id, xattr_version_id);
if (prev_version_id) {
- const xattr_prev_version_id = get_version_id_by_xattr(stat, true);
if (is_enabled) {
// When versioning is Enabled the version IDs are unique.
// Hence, the new version ID must be different than the previous one.
assert.notEqual(new_version_id, prev_version_id);
}
- assert.equal(xattr_prev_version_id, prev_version_id);
}
return true;
}
@@ -2819,8 +2838,7 @@ function get_version_id_by_stat(stat) {
return 'mtime-' + stat.mtimeNsBigint.toString(36) + '-ino-' + stat.ino.toString(36);
}
-function get_version_id_by_xattr(stat, prev) {
- if (prev) return stat && stat.xattr[XATTR_PREV_VERSION_ID];
+function get_version_id_by_xattr(stat) {
return (stat && stat.xattr[XATTR_VERSION_ID]) || 'null';
}
diff --git a/src/test/unit_tests/test_namespace_fs.js b/src/test/unit_tests/test_namespace_fs.js
index ecabff5009..d334586f6b 100644
--- a/src/test/unit_tests/test_namespace_fs.js
+++ b/src/test/unit_tests/test_namespace_fs.js
@@ -1582,12 +1582,14 @@ mocha.describe('namespace_fs copy object', function() {
assert.deepStrictEqual(xattr, { ...add_user_prefix(read_md_res.xattr), [XATTR_DIR_CONTENT]: `${read_md_res.size}` });
assert.equal(stream_content_type, read_md_res.content_type);
+ const copy_source = { bucket: upload_bkt, key: key1 };
await ns_tmp.upload_object({
bucket: upload_bkt,
key: key2,
- copy_source: { bucket: upload_bkt, key: key1 },
+ copy_source: copy_source,
size: 100,
- xattr_copy: true
+ xattr_copy: true,
+ xattr: await _get_source_copy_xattr(copy_source, ns_tmp, dummy_object_sdk)
}, dummy_object_sdk);
const file_path2 = ns_tmp_bucket_path + '/' + key2;
xattr = await get_xattr(file_path2);
@@ -1622,12 +1624,14 @@ mocha.describe('namespace_fs copy object', function() {
assert.deepStrictEqual(xattr, { ...add_user_prefix(read_md_res.xattr) });
assert.equal(stream_content_type, read_md_res.content_type);
+ const copy_source = { bucket: upload_bkt, key: src_key };
await ns_tmp.upload_object({
bucket: upload_bkt,
key: dst_key,
- copy_source: { bucket: upload_bkt, key: src_key },
+ copy_source: copy_source,
size: 100,
xattr_copy: true,
+ xattr: await _get_source_copy_xattr(copy_source, ns_tmp, dummy_object_sdk)
}, dummy_object_sdk);
const file_path2 = ns_tmp_bucket_path + '/' + dst_key;
xattr = await get_xattr(file_path2);
@@ -1663,12 +1667,14 @@ mocha.describe('namespace_fs copy object', function() {
assert.deepStrictEqual(xattr, { ...add_user_prefix(read_md_res.xattr) });
assert.equal(stream_content_type, read_md_res.content_type);
+ const copy_source = { bucket: upload_bkt, key: src_key };
await ns_tmp.upload_object({
bucket: upload_bkt,
key: dst_key,
- copy_source: { bucket: upload_bkt, key: src_key },
+ copy_source: copy_source,
size: 0,
- xattr_copy: true
+ xattr_copy: true,
+ xattr: await _get_source_copy_xattr(copy_source, ns_tmp, dummy_object_sdk)
}, dummy_object_sdk);
const file_path2 = ns_tmp_bucket_path + '/' + dst_key;
xattr = await get_xattr(file_path2);
@@ -1694,6 +1700,16 @@ mocha.describe('namespace_fs copy object', function() {
});
+//simulates object_sdk.fix_copy_source_params filtering of source xattr for copy object tests
+async function _get_source_copy_xattr(copy_source, source_ns, object_sdk) {
+ const read_md_res = await source_ns.read_object_md({
+ bucket: copy_source.bucket,
+ key: copy_source.key
+ }, object_sdk);
+ const res = _.omitBy(read_md_res.xattr, (val, name) => name.startsWith?.('noobaa-namespace'));
+ return res;
+}
+
async function list_objects(ns, bucket, delimiter, prefix, dummy_object_sdk) {
const res = await ns.list_objects({
bucket: bucket,
diff --git a/src/test/unit_tests/test_nsfs_versioning_gpfs.js b/src/test/unit_tests/test_nsfs_versioning_gpfs.js
index 34f84645d3..ee9153fe17 100644
--- a/src/test/unit_tests/test_nsfs_versioning_gpfs.js
+++ b/src/test/unit_tests/test_nsfs_versioning_gpfs.js
@@ -196,6 +196,28 @@ mocha.describe('namespace_fs gpfs- versioning', async function() {
assert.equal(head_res.version_id, latest_version_id);
});
+ mocha.it('delete object with version id - versioning enabled', async function() {
+ // 1. put bucket versioning enabled
+ await ns_obj.set_bucket_versioning('ENABLED', dummy_object_sdk);
+ // 2. create multiple versions (2)
+ const key2 = 'my-key-to-delete.txt';
+ await put_object(dummy_object_sdk, ns_obj, gpfs_bucket, key2);
+ const put_res2 = await put_object(dummy_object_sdk, ns_obj, gpfs_bucket, key2);
+ // 3. delete object by version-id
+ const delete_res = await delete_object(dummy_object_sdk, ns_obj, gpfs_bucket, key, put_res2.version_id);
+ assert.equal(delete_res.version_id, put_res2.version_id);
+ });
+
+ mocha.it('delete objects - versioning enabled - use delete_multiple_objects to delete a single non-existing key', async function() {
+ // 1. put bucket versioning enabled
+ await ns_obj.set_bucket_versioning('ENABLED', dummy_object_sdk);
+ // 2. delete objects (a single non existing key)
+ const objects = [{ key: 'non-existing-key', version_id: undefined }];
+ const delete_objects_res = await delete_multiple_objects(dummy_object_sdk, ns_obj, gpfs_bucket, objects);
+ assert.equal(delete_objects_res.created_delete_marker, true);
+ assert.ok(delete_objects_res.created_version_id !== undefined);
+ });
+
});
async function put_object(dummy_object_sdk, ns, bucket, key) {
@@ -229,6 +251,15 @@ async function delete_object(dummy_object_sdk, ns, bucket, key, version_id) {
return delete_res;
}
+async function delete_multiple_objects(dummy_object_sdk, ns, bucket, objects) {
+ const delete_objects_res = await ns.delete_multiple_objects({
+ bucket,
+ objects
+ }, dummy_object_sdk);
+ console.log('delete_multiple_objects response', util.inspect(delete_objects_res));
+ return delete_objects_res;
+}
+
async function stat_and_get_all(full_path, key) {
const key_path = path.join(full_path, key);
diff --git a/src/util/native_fs_utils.js b/src/util/native_fs_utils.js
index f56fd288fe..002b6a3d61 100644
--- a/src/util/native_fs_utils.js
+++ b/src/util/native_fs_utils.js
@@ -229,6 +229,23 @@ async function safe_unlink_posix(fs_context, to_delete_path, to_delete_version_i
}
}
+/**
+ * unlink_ignore_enoent unlinks a file and if recieved an ENOENT error it'll not fail
+ * @param {nb.NativeFSContext} fs_context
+ * @param {String} to_delete_path
+ * @returns {Promise}
+ */
+async function unlink_ignore_enoent(fs_context, to_delete_path) {
+ dbg.log1('native_fs_utils.unlink_ignore_enoent:', to_delete_path);
+ try {
+ await nb_native().fs.unlink(fs_context, to_delete_path);
+ } catch (err) {
+ dbg.warn(`native_fs_utils.unlink_ignore_enoent unlink error: file path ${to_delete_path} error`, err);
+ if (err.code !== 'ENOENT') throw err;
+ dbg.warn(`native_fs_utils.unlink_ignore_enoent unlink: file ${to_delete_path} already deleted, ignoring..`);
+ }
+}
+
// safe_link_gpfs links source_path to dest_path while verifing dest.fd
async function safe_link_gpfs(fs_context, dst_path, src_file, dst_file) {
dbg.log1('Namespace_fs.safe_link_gpfs source_file:', src_file, src_file.fd, dst_file, dst_file && dst_file.fd);
@@ -655,6 +672,7 @@ exports.validate_bucket_creation = validate_bucket_creation;
exports.is_path_exists = is_path_exists;
exports.is_dir_rw_accessible = is_dir_rw_accessible;
exports.folder_delete = folder_delete;
+exports.unlink_ignore_enoent = unlink_ignore_enoent;
exports.get_bucket_tmpdir_full_path = get_bucket_tmpdir_full_path;
exports.get_bucket_tmpdir_name = get_bucket_tmpdir_name;
exports.entity_enum = entity_enum;