diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 204c1a564fe7..949227bd373f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -67,6 +67,7 @@ import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NOT_IMPLEMENTED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE; /** @@ -148,7 +149,7 @@ public Response get( } } catch (OMException ex) { if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName); + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else { throw ex; } @@ -242,8 +243,7 @@ public Response put(@PathParam("bucket") String bucketName, .build(); } catch (OMException exception) { if (exception.getResult() == ResultCodes.INVALID_BUCKET_NAME) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_BUCKET_NAME, - bucketName); + throw newError(S3ErrorTable.INVALID_BUCKET_NAME, bucketName, exception); } LOG.error("Error in Create Bucket Request for bucket: {}", bucketName, exception); @@ -263,8 +263,7 @@ public Response listMultipartUploads( ozoneMultipartUploadList = bucket.listMultipartUploads(prefix); } catch (OMException exception) { if (exception.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, - prefix); + throw newError(S3ErrorTable.ACCESS_DENIED, prefix, exception); } throw exception; } @@ -309,13 +308,11 @@ public Response delete(@PathParam("bucket") String bucketName) deleteS3Bucket(bucketName); } catch (OMException ex) { if (ex.getResult() == ResultCodes.BUCKET_NOT_EMPTY) { - throw S3ErrorTable.newError(S3ErrorTable - .BUCKET_NOT_EMPTY, bucketName); + throw newError(S3ErrorTable.BUCKET_NOT_EMPTY, bucketName, ex); } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable - .NO_SUCH_BUCKET, bucketName); + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName); + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else { throw ex; } @@ -402,14 +399,12 @@ public S3BucketAcl getAcl(String bucketName) return result; } catch (OMException ex) { if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable - .NO_SUCH_BUCKET, bucketName); + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable - .ACCESS_DENIED, bucketName); + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else { LOG.error("Failed to get acl of Bucket " + bucketName, ex); - throw S3ErrorTable.newError(S3ErrorTable.INTERNAL_ERROR, bucketName); + throw newError(S3ErrorTable.INTERNAL_ERROR, bucketName, ex); } } } @@ -503,11 +498,9 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders, } } catch (OMException exception) { if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, - bucketName); + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); } else if (exception.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable - .ACCESS_DENIED, bucketName); + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); } LOG.error("Error in set ACL Request for bucket: {}", bucketName, exception); @@ -531,13 +524,13 @@ private List getAndConvertAclOnBucket(String value, for (String acl: subValues) { String[] part = acl.split("="); if (part.length != 2) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, acl); + throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); } S3Acl.ACLIdentityType type = S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); if (type == null || !type.isSupported()) { LOG.warn("S3 grantee {} is null or not supported", part[0]); - throw S3ErrorTable.newError(NOT_IMPLEMENTED, part[0]); + throw newError(NOT_IMPLEMENTED, part[0]); } // Build ACL on Bucket BitSet aclsOnBucket = @@ -564,13 +557,13 @@ private List getAndConvertAclOnVolume(String value, for (String acl: subValues) { String[] part = acl.split("="); if (part.length != 2) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, acl); + throw newError(S3ErrorTable.INVALID_ARGUMENT, acl); } S3Acl.ACLIdentityType type = S3Acl.ACLIdentityType.getTypeFromHeaderType(part[0]); if (type == null || !type.isSupported()) { LOG.warn("S3 grantee {} is null or not supported", part[0]); - throw S3ErrorTable.newError(NOT_IMPLEMENTED, part[0]); + throw newError(NOT_IMPLEMENTED, part[0]); } // Build ACL on Volume BitSet aclsOnVolume = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 8014cefe58a3..ffd95cb6be18 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -38,6 +38,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; + /** * Basic helpers for all the REST endpoints. */ @@ -57,7 +59,7 @@ protected OzoneBucket getBucket(OzoneVolume volume, String bucketName) bucket = volume.getBucket(bucketName); } catch (OMException ex) { if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName); + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); } else { throw ex; } @@ -88,9 +90,9 @@ protected OzoneBucket getBucket(String bucketName) } catch (OMException ex) { if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND || ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName); + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName); + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else { throw ex; } @@ -117,7 +119,7 @@ protected String createS3Bucket(String bucketName) throws client.getObjectStore().createS3Bucket(bucketName); } catch (OMException ex) { if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, bucketName); + throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else if (ex.getResult() != ResultCodes.BUCKET_ALREADY_EXISTS) { // S3 does not return error for bucket already exists, it just // returns the location. @@ -138,8 +140,7 @@ public void deleteS3Bucket(String s3BucketName) client.getObjectStore().deleteS3Bucket(s3BucketName); } catch (OMException ex) { if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, - s3BucketName); + throw newError(S3ErrorTable.ACCESS_DENIED, s3BucketName, ex); } throw ex; } @@ -182,8 +183,7 @@ private Iterator iterateBuckets( if (e.getResult() == ResultCodes.VOLUME_NOT_FOUND) { return Collections.emptyIterator(); } else if (e.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, - "listBuckets"); + throw newError(S3ErrorTable.ACCESS_DENIED, "listBuckets", e); } else { throw e; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index d6f46e087e1f..863b1b08cd7e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -95,6 +95,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; @@ -210,8 +211,7 @@ public Response put( .build(); } catch (OMException ex) { if (ex.getResult() == ResultCodes.NOT_A_FILE) { - OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, - keyPath); + OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + @@ -219,7 +219,7 @@ public Response put( "which caused put operation to fail."); throw os3Exception; } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath); + throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } LOG.error("Exception occurred in PutObject", ex); throw ex; @@ -274,8 +274,7 @@ public Response get( length); LOG.debug("range Header provided: {}", rangeHeader); if (rangeHeader.isInValidRange()) { - throw S3ErrorTable.newError( - S3ErrorTable.INVALID_RANGE, rangeHeaderVal); + throw newError(S3ErrorTable.INVALID_RANGE, rangeHeaderVal); } } ResponseBuilder responseBuilder; @@ -326,10 +325,9 @@ public Response get( return responseBuilder.build(); } catch (OMException ex) { if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable - .NO_SUCH_KEY, keyPath); + throw newError(S3ErrorTable.NO_SUCH_KEY, keyPath, ex); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath); + throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } else { throw ex; } @@ -368,7 +366,7 @@ public Response head( // Just return 404 with no content return Response.status(Status.NOT_FOUND).build(); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath); + throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } else { throw ex; } @@ -398,7 +396,7 @@ private Response abortMultipartUpload(String bucket, String key, String ozoneBucket.abortMultipartUpload(key, uploadId); } catch (OMException ex) { if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId); + throw newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId, ex); } throw ex; } @@ -433,8 +431,7 @@ public Response delete( bucket.deleteKey(keyPath); } catch (OMException ex) { if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable - .NO_SUCH_BUCKET, bucketName); + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { //NOT_FOUND is not a problem, AWS doesn't throw exception for missing // keys. Just return 204 @@ -444,7 +441,7 @@ public Response delete( // NOT_FOUND is not a problem, AWS doesn't throw exception for missing // keys. Just return 204 } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath); + throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } else { throw ex; } @@ -495,7 +492,7 @@ public Response initializeMultipartUpload( multipartUploadInitiateResponse).build(); } catch (OMException ex) { if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, key); + throw newError(S3ErrorTable.ACCESS_DENIED, key, ex); } LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex); @@ -542,21 +539,21 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, .build(); } catch (OMException ex) { if (ex.getResult() == ResultCodes.INVALID_PART) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key); + throw newError(S3ErrorTable.INVALID_PART, key, ex); } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART_ORDER, key); + throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex); } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID); + throw newError(NO_SUCH_UPLOAD, uploadID, ex); } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) { - throw S3ErrorTable.newError(ENTITY_TOO_SMALL, key); + throw newError(ENTITY_TOO_SMALL, key, ex); } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) { - OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key); + OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part"); throw os3Exception; } else if (ex.getResult() == ResultCodes.NOT_A_FILE) { - OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key); + OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + @@ -602,8 +599,7 @@ private Response createMultipartKey(String bucket, String key, long length, headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE); if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) { - throw S3ErrorTable.newError(PRECOND_FAILED, - sourceBucket + "/" + sourceKey); + throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey); } try (OzoneInputStream sourceObject = @@ -651,11 +647,9 @@ private Response createMultipartKey(String bucket, String key, long length, } catch (OMException ex) { if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw S3ErrorTable.newError(NO_SUCH_UPLOAD, - uploadID); + throw newError(NO_SUCH_UPLOAD, uploadID, ex); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, - bucket + "/" + key); + throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex); } throw ex; } @@ -710,11 +704,10 @@ private Response listParts(String bucket, String key, String uploadID, } catch (OMException ex) { if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw S3ErrorTable.newError(NO_SUCH_UPLOAD, - uploadID); + throw newError(NO_SUCH_UPLOAD, uploadID, ex); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, - bucket + "/" + key + "/" + uploadID); + throw newError(S3ErrorTable.ACCESS_DENIED, + bucket + "/" + key + "/" + uploadID, ex); } throw ex; } @@ -750,8 +743,7 @@ private CopyObjectResponse copyObject(String copyHeader, // options like storage type are provided or not when source and // dest are given same if (storageTypeDefault) { - OS3Exception ex = S3ErrorTable.newError(S3ErrorTable - .INVALID_REQUEST, copyHeader); + OS3Exception ex = newError(S3ErrorTable.INVALID_REQUEST, copyHeader); ex.setErrorMessage("This copy request is illegal because it is " + "trying to copy an object to it self itself without changing " + "the object's metadata, storage class, website redirect " + @@ -797,12 +789,12 @@ private CopyObjectResponse copyObject(String copyHeader, return copyObjectResponse; } catch (OMException ex) { if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, sourceKey); + throw newError(S3ErrorTable.NO_SUCH_KEY, sourceKey, ex); } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket); + throw newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket, ex); } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, - destBucket + "/" + destkey); + throw newError(S3ErrorTable.ACCESS_DENIED, + destBucket + "/" + destkey, ex); } throw ex; } finally { @@ -829,7 +821,7 @@ public static Pair parseSourceHeader(String copyHeader) } int pos = header.indexOf('/'); if (pos == -1) { - OS3Exception ex = S3ErrorTable.newError(INVALID_ARGUMENT, header); + OS3Exception ex = newError(INVALID_ARGUMENT, header); ex.setErrorMessage("Copy Source must mention the source bucket and " + "key: sourcebucket/sourcekey"); throw ex; @@ -840,7 +832,7 @@ public static Pair parseSourceHeader(String copyHeader) String key = urlDecode(header.substring(pos + 1)); return Pair.of(bucket, key); } catch (UnsupportedEncodingException e) { - OS3Exception ex = S3ErrorTable.newError(INVALID_ARGUMENT, header); + OS3Exception ex = newError(INVALID_ARGUMENT, header, e); ex.setErrorMessage("Copy Source header could not be url-decoded"); throw ex; } @@ -851,8 +843,7 @@ private static S3StorageType toS3StorageType(String storageType) try { return S3StorageType.valueOf(storageType); } catch (IllegalArgumentException ex) { - throw S3ErrorTable.newError(INVALID_ARGUMENT, - storageType); + throw newError(INVALID_ARGUMENT, storageType, ex); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index d36e81d4969a..84ec32599cb5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -120,20 +120,26 @@ private S3ErrorTable() { "NotImplemented", "This part of feature is not implemented yet.", HTTP_NOT_IMPLEMENTED); + public static OS3Exception newError(OS3Exception e, String resource) { + return newError(e, resource, null); + } + /** * Create a new instance of Error. * @param e Error Template * @param resource Resource associated with this exception + * @param ex the original exception, may be null * @return creates a new instance of error based on the template */ - public static OS3Exception newError(OS3Exception e, String resource) { + public static OS3Exception newError(OS3Exception e, String resource, + Exception ex) { OS3Exception err = new OS3Exception(e.getCode(), e.getErrorMessage(), e.getHttpCode()); err.setResource(resource); if (e.getHttpCode() == HTTP_INTERNAL_ERROR) { - LOG.error("Internal Error: {}", err.toXml(), e); + LOG.error("Internal Error: {}", err.toXml(), ex); } else if (LOG.isDebugEnabled()) { - LOG.debug(err.toXml(), e); + LOG.debug(err.toXml(), ex); } return err; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java index ce75c593476a..0a34f1470047 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java @@ -111,7 +111,7 @@ public static ContinueToken decodeFromString(String key) throws OS3Exception { } catch (DecoderException ex) { OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable - .INVALID_ARGUMENT, key); + .INVALID_ARGUMENT, key, ex); os3Exception.setErrorMessage("The continuation token provided is " + "incorrect"); throw os3Exception;