Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions sdk/storage/azure-storage-file-datalake/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Change Log azure-storage-file-datalake

## Version XX.X.X-beta.X (XXXX-XX-XX)
- Added SAS generation methods on clients to improve discoverability and convenience of sas.
- Mapped StorageErrorException and BlobStorageException to DataLakeStorageException.

## Version 12.0.0-beta.7 (2019-12-04)
This package's
[documentation](https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/storage/azure-storage-file-datalake/README.md)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import com.azure.storage.file.datalake.implementation.models.LeaseAccessConditions;
import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions;
import com.azure.storage.file.datalake.implementation.models.PathResourceType;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DownloadRetryOptions;
import com.azure.storage.file.datalake.models.FileRange;
Expand Down Expand Up @@ -328,7 +329,8 @@ public Mono<FileReadAsyncResponse> readWithResponse(FileRange range, DownloadRet
try {
return blockBlobAsyncClient.downloadWithResponse(Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5).map(Transforms::toFileReadAsyncResponse);
getRangeContentMd5).map(Transforms::toFileReadAsyncResponse)
.onErrorMap(DataLakeImplUtils::transformBlobStorageException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,11 @@
import com.azure.core.util.logging.ClientLogger;
import com.azure.storage.blob.BlobAsyncClient;
import com.azure.storage.blob.models.BlobDownloadResponse;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.blob.specialized.BlockBlobClient;
import com.azure.storage.common.Utility;
import com.azure.storage.common.implementation.StorageImplUtils;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DownloadRetryOptions;
import com.azure.storage.file.datalake.models.FileRange;
Expand Down Expand Up @@ -276,10 +278,14 @@ public void read(OutputStream stream) {
*/
public FileReadResponse readWithResponse(OutputStream stream, FileRange range, DownloadRetryOptions options,
DataLakeRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
try {
BlobDownloadResponse response = blockBlobClient.downloadWithResponse(stream, Transforms.toBlobRange(range),
Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions),
getRangeContentMd5, timeout, context);
return Transforms.toFileReadResponse(response);
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import com.azure.storage.file.datalake.implementation.DataLakeStorageClientImpl;
import com.azure.storage.file.datalake.implementation.models.FileSystemsListPathsResponse;
import com.azure.storage.file.datalake.implementation.models.Path;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DataLakeSignedIdentifier;
import com.azure.storage.file.datalake.models.FileSystemAccessPolicies;
Expand Down Expand Up @@ -241,7 +242,8 @@ public Mono<Void> create() {
*/
public Mono<Response<Void>> createWithResponse(Map<String, String> metadata, PublicAccessType accessType) {
try {
return blobContainerAsyncClient.createWithResponse(metadata, Transforms.toBlobPublicAccessType(accessType));
return blobContainerAsyncClient.createWithResponse(metadata, Transforms.toBlobPublicAccessType(accessType))
.onErrorMap(DataLakeImplUtils::transformBlobStorageException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down Expand Up @@ -283,7 +285,8 @@ public Mono<Void> delete() {
public Mono<Response<Void>> deleteWithResponse(DataLakeRequestConditions requestConditions) {
try {
return blobContainerAsyncClient.deleteWithResponse(
Transforms.toBlobRequestConditions(requestConditions));
Transforms.toBlobRequestConditions(requestConditions))
.onErrorMap(DataLakeImplUtils::transformBlobStorageException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down Expand Up @@ -322,6 +325,7 @@ public Mono<FileSystemProperties> getProperties() {
public Mono<Response<FileSystemProperties>> getPropertiesWithResponse(String leaseId) {
try {
return blobContainerAsyncClient.getPropertiesWithResponse(leaseId)
.onErrorMap(DataLakeImplUtils::transformBlobStorageException)
.map(response -> new SimpleResponse<>(response,
Transforms.toFileSystemProperties(response.getValue())));
} catch (RuntimeException ex) {
Expand Down Expand Up @@ -368,7 +372,8 @@ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata
DataLakeRequestConditions requestConditions) {
try {
return blobContainerAsyncClient.setMetadataWithResponse(metadata,
Transforms.toBlobRequestConditions(requestConditions));
Transforms.toBlobRequestConditions(requestConditions))
.onErrorMap(DataLakeImplUtils::transformBlobStorageException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down Expand Up @@ -664,7 +669,8 @@ public Mono<Response<Void>> setAccessPolicyWithResponse(PublicAccessType accessT
List<DataLakeSignedIdentifier> identifiers, DataLakeRequestConditions requestConditions) {
try {
return blobContainerAsyncClient.setAccessPolicyWithResponse(Transforms.toBlobPublicAccessType(accessType),
Transforms.toBlobIdentifierList(identifiers), Transforms.toBlobRequestConditions(requestConditions));
Transforms.toBlobIdentifierList(identifiers), Transforms.toBlobRequestConditions(requestConditions))
.onErrorMap(DataLakeImplUtils::transformBlobStorageException);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down Expand Up @@ -704,6 +710,7 @@ public Mono<FileSystemAccessPolicies> getAccessPolicy() {
public Mono<Response<FileSystemAccessPolicies>> getAccessPolicyWithResponse(String leaseId) {
try {
return blobContainerAsyncClient.getAccessPolicyWithResponse(leaseId)
.onErrorMap(DataLakeImplUtils::transformBlobStorageException)
.map(response -> new SimpleResponse<>(response,
Transforms.toFileSystemAccessPolicies(response.getValue())));
} catch (RuntimeException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
import com.azure.storage.blob.BlobContainerClient;
import com.azure.storage.blob.models.BlobContainerAccessPolicies;
import com.azure.storage.blob.models.BlobContainerProperties;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.common.StorageSharedKeyCredential;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.DataLakeSignedIdentifier;
import com.azure.storage.file.datalake.models.FileSystemAccessPolicies;
Expand Down Expand Up @@ -195,8 +197,12 @@ public void create() {
*/
public Response<Void> createWithResponse(Map<String, String> metadata, PublicAccessType accessType,
Duration timeout, Context context) {
return blobContainerClient.createWithResponse(metadata, Transforms.toBlobPublicAccessType(accessType), timeout,
context);
try {
return blobContainerClient.createWithResponse(metadata, Transforms.toBlobPublicAccessType(accessType),
timeout, context);
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down Expand Up @@ -228,8 +234,12 @@ public void delete() {
*/
public Response<Void> deleteWithResponse(DataLakeRequestConditions requestConditions, Duration timeout,
Context context) {
return blobContainerClient.deleteWithResponse(Transforms.toBlobRequestConditions(requestConditions),
timeout, context);
try {
return blobContainerClient.deleteWithResponse(Transforms.toBlobRequestConditions(requestConditions),
timeout, context);
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down Expand Up @@ -260,9 +270,13 @@ public FileSystemProperties getProperties() {
* @return A response containing the file system properties.
*/
public Response<FileSystemProperties> getPropertiesWithResponse(String leaseId, Duration timeout, Context context) {
Response<BlobContainerProperties> response = blobContainerClient.getPropertiesWithResponse(leaseId, timeout,
context);
return new SimpleResponse<>(response, Transforms.toFileSystemProperties(response.getValue()));
try {
Response<BlobContainerProperties> response = blobContainerClient.getPropertiesWithResponse(leaseId, timeout,
context);
return new SimpleResponse<>(response, Transforms.toFileSystemProperties(response.getValue()));
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down Expand Up @@ -294,8 +308,12 @@ public void setMetadata(Map<String, String> metadata) {
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return blobContainerClient.setMetadataWithResponse(metadata,
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
try {
return blobContainerClient.setMetadataWithResponse(metadata,
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down Expand Up @@ -523,9 +541,13 @@ public FileSystemAccessPolicies getAccessPolicy() {
*/
public Response<FileSystemAccessPolicies> getAccessPolicyWithResponse(String leaseId, Duration timeout,
Context context) {
Response<BlobContainerAccessPolicies> response = blobContainerClient.getAccessPolicyWithResponse(leaseId,
timeout, context);
return new SimpleResponse<>(response, Transforms.toFileSystemAccessPolicies(response.getValue()));
try {
Response<BlobContainerAccessPolicies> response = blobContainerClient.getAccessPolicyWithResponse(leaseId,
timeout, context);
return new SimpleResponse<>(response, Transforms.toFileSystemAccessPolicies(response.getValue()));
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down Expand Up @@ -575,10 +597,14 @@ public void setAccessPolicy(PublicAccessType accessType, List<DataLakeSignedIden
public Response<Void> setAccessPolicyWithResponse(PublicAccessType accessType,
List<DataLakeSignedIdentifier> identifiers, DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
return blobContainerClient
.setAccessPolicyWithResponse(Transforms.toBlobPublicAccessType(accessType),
Transforms.toBlobIdentifierList(identifiers), Transforms.toBlobRequestConditions(requestConditions),
timeout, context);
try {
return blobContainerClient
.setAccessPolicyWithResponse(Transforms.toBlobPublicAccessType(accessType),
Transforms.toBlobIdentifierList(identifiers), Transforms.toBlobRequestConditions(requestConditions),
timeout, context);
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

BlobContainerClient getBlobContainerClient() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import com.azure.storage.blob.BlobContainerAsyncClient;
import com.azure.storage.blob.BlobServiceVersion;
import com.azure.storage.blob.BlobUrlParts;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.blob.specialized.BlockBlobAsyncClient;
import com.azure.storage.blob.specialized.SpecializedBlobClientBuilder;
import com.azure.storage.common.StorageSharedKeyCredential;
Expand Down Expand Up @@ -324,7 +325,8 @@ public Mono<Response<Void>> setMetadataWithResponse(Map<String, String> metadata
DataLakeRequestConditions requestConditions) {
try {
return this.blockBlobAsyncClient.setMetadataWithResponse(metadata,
Transforms.toBlobRequestConditions(requestConditions));
Transforms.toBlobRequestConditions(requestConditions))
.onErrorMap(ex -> DataLakeImplUtils.transformBlobStorageException((BlobStorageException) ex));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down Expand Up @@ -371,7 +373,8 @@ public Mono<Response<Void>> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions) {
try {
return this.blockBlobAsyncClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions));
Transforms.toBlobRequestConditions(requestConditions))
.onErrorMap(ex -> DataLakeImplUtils.transformBlobStorageException((BlobStorageException) ex));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
Expand Down Expand Up @@ -413,6 +416,7 @@ public Mono<PathProperties> getProperties() {
public Mono<Response<PathProperties>> getPropertiesWithResponse(DataLakeRequestConditions requestConditions) {
try {
return blockBlobAsyncClient.getPropertiesWithResponse(Transforms.toBlobRequestConditions(requestConditions))
.onErrorMap(ex -> DataLakeImplUtils.transformBlobStorageException((BlobStorageException) ex))
.map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue())));
} catch (RuntimeException ex) {
return monoError(logger, ex);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,15 @@
import com.azure.core.util.Context;
import com.azure.core.util.logging.ClientLogger;
import com.azure.storage.blob.models.BlobProperties;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.blob.specialized.BlockBlobClient;
import com.azure.storage.common.StorageSharedKeyCredential;
import com.azure.storage.common.implementation.StorageImplUtils;
import com.azure.storage.file.datalake.implementation.models.LeaseAccessConditions;
import com.azure.storage.file.datalake.implementation.models.ModifiedAccessConditions;
import com.azure.storage.file.datalake.implementation.models.PathRenameMode;
import com.azure.storage.file.datalake.implementation.models.SourceModifiedAccessConditions;
import com.azure.storage.file.datalake.implementation.util.DataLakeImplUtils;
import com.azure.storage.file.datalake.models.DataLakeRequestConditions;
import com.azure.storage.file.datalake.models.PathAccessControl;
import com.azure.storage.file.datalake.models.PathAccessControlEntry;
Expand Down Expand Up @@ -192,8 +194,12 @@ public void setMetadata(Map<String, String> metadata) {
*/
public Response<Void> setMetadataWithResponse(Map<String, String> metadata,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return blockBlobClient.setMetadataWithResponse(metadata, Transforms.toBlobRequestConditions(requestConditions),
timeout, context);
try {
return blockBlobClient.setMetadataWithResponse(metadata,
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down Expand Up @@ -232,8 +238,12 @@ public void setHttpHeaders(PathHttpHeaders headers) {
*/
public Response<Void> setHttpHeadersWithResponse(PathHttpHeaders headers,
DataLakeRequestConditions requestConditions, Duration timeout, Context context) {
return blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
try {
return blockBlobClient.setHttpHeadersWithResponse(Transforms.toBlobHttpHeaders(headers),
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down Expand Up @@ -400,9 +410,13 @@ public PathProperties getProperties() {
*/
public Response<PathProperties> getPropertiesWithResponse(DataLakeRequestConditions requestConditions,
Duration timeout, Context context) {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue()));
try {
Response<BlobProperties> response = blockBlobClient.getPropertiesWithResponse(
Transforms.toBlobRequestConditions(requestConditions), timeout, context);
return new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue()));
} catch (BlobStorageException ex) {
throw logger.logExceptionAsError(DataLakeImplUtils.transformBlobStorageException(ex));
}
}

/**
Expand Down
Loading