@@ -94,11 +94,14 @@ static inline void TF_SetStatusFromAWSError(
9494void ParseS3Path (const Aws::String& fname, bool object_empty_ok,
9595 Aws::String* bucket, Aws::String* object, TF_Status* status) {
9696 size_t scheme_end = fname.find (" ://" ) + 2 ;
97- if (fname.substr (0 , scheme_end + 1 ) != " s3://" ) {
98- TF_SetStatus (status, TF_INVALID_ARGUMENT,
99- " S3 path doesn't start with 's3://'." );
100- return ;
101- }
97+ // NOTE: We disable the check here as we may apply different scheme
98+ // like `s3e://` to help testing (so that it can be switched to `s3://`
99+ // later).
100+ // if (fname.substr(0, scheme_end + 1) != "s3://") {
101+ // TF_SetStatus(status, TF_INVALID_ARGUMENT,
102+ // "S3 path doesn't start with 's3://'.");
103+ // return;
104+ // }
102105
103106 size_t bucket_end = fname.find (" /" , scheme_end + 1 );
104107 if (bucket_end == std::string::npos) {
@@ -507,13 +510,17 @@ uint64_t Length(const TF_ReadOnlyMemoryRegion* region) {
507510
508511// SECTION 4. Implementation for `TF_Filesystem`, the actual filesystem
509512// ----------------------------------------------------------------------------
513+ // TODO: use_multi_part_download=true will cause an issue when read request
514+ // is asking for bytes larger than object size. In that case, a status code of
515+ // 416 is returned. However, 416 is not handled correctly and s3 thought this
516+ // is an error - should return OUT_OF_RANGE with less bytes.
510517namespace tf_s3_filesystem {
511518S3File::S3File ()
512519 : s3_client(nullptr , ShutdownClient),
513520 executor (nullptr ),
514521 transfer_managers(),
515522 multi_part_chunk_sizes(),
516- use_multi_part_download(true ),
523+ use_multi_part_download(false ), // TODO: change to true after fix
517524 initialization_lock() {
518525 uint64_t temp_value;
519526 multi_part_chunk_sizes[Aws::Transfer::TransferDirection::UPLOAD] =
0 commit comments