diff --git a/sdk/storage/Azure.Storage.Common/src/Constants.cs b/sdk/storage/Azure.Storage.Common/src/Constants.cs
index e535c7113b64..15171e50a13d 100644
--- a/sdk/storage/Azure.Storage.Common/src/Constants.cs
+++ b/sdk/storage/Azure.Storage.Common/src/Constants.cs
@@ -355,6 +355,37 @@ internal static class Share
}
}
+ ///
+ /// Data Lake constant values;
+ ///
+ internal static class DataLake
+ {
+ ///
+ /// The blob URI suffex.
+ ///
+ public const string BlobUriSuffix = "blob";
+
+ ///
+ /// The DFS URI suffex.
+ ///
+ public const string DfsUriSuffix = "dfs";
+
+ ///
+ /// The key of the object json object returned for errors.
+ ///
+ public const string ErrorKey = "error";
+
+ ///
+ /// The key of the error code returned for errors.
+ ///
+ public const string ErrorCodeKey = "code";
+
+ ///
+ /// The key of the error message returned for errors.
+ ///
+ public const string ErrorMessageKey = "message";
+ }
+
///
/// Queue constant values
///
diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/StorageTestBase.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/StorageTestBase.cs
index 477359727622..7e1af04895db 100644
--- a/sdk/storage/Azure.Storage.Common/tests/Shared/StorageTestBase.cs
+++ b/sdk/storage/Azure.Storage.Common/tests/Shared/StorageTestBase.cs
@@ -61,6 +61,14 @@ public StorageTestBase(bool async, RecordedTestMode? mode = null)
"Storage_TestConfigOAuth",
() => TestConfigurations.DefaultTargetOAuthTenant);
+ ///
+ /// Gets the tenant to use for any tests that require authentication
+ /// with Azure AD.
+ ///
+ public TenantConfiguration TestConfigHierarchicalNamespace => GetTestConfig(
+ "Storage_TestConfigHierarchicalNamespace",
+ () => TestConfigurations.DefaultTargetHierarchicalNamespaceTenant);
+
///
/// Gets a cache used for storing serialized tenant configurations. Do
/// not get values from this directly; use GetTestConfig.
@@ -186,7 +194,7 @@ public TokenCredential GetOAuthCredential(string tenantId, string appId, string
Recording.InstrumentClientOptions(
new TokenCredentialOptions() { AuthorityHost = authorityHost }));
- public void AssertMetadataEquality(IDictionary expected, IDictionary actual)
+ public virtual void AssertMetadataEquality(IDictionary expected, IDictionary actual)
{
Assert.IsNotNull(expected, "Expected metadata is null");
Assert.IsNotNull(actual, "Actual metadata is null");
diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurations.cs b/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurations.cs
index 5bb1424b849c..8932e346f662 100644
--- a/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurations.cs
+++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurations.cs
@@ -66,6 +66,12 @@ public class TestConfigurations
///
private string TargetOAuthTenantName { get; set; }
+ ///
+ /// Gets the name of the tenant in the Tenants dictionary to use for
+ /// any tests that require hierarchical namespace.
+ ///
+ private string TargetHierarchicalNamespaceTenantName { get; set; }
+
///
/// Gets the tenant to use by default for our tests.
///
@@ -98,6 +104,12 @@ public class TestConfigurations
public static TenantConfiguration DefaultTargetOAuthTenant =>
GetTenant("TargetOAuthTenant", s_configurations.Value.TargetOAuthTenantName);
+ ///
+ /// Gets a tenant to use for any tests that require hierarchical namespace
+ ///
+ public static TenantConfiguration DefaultTargetHierarchicalNamespaceTenant =>
+ GetTenant("TargetHierarchicalNamespaceTenant", s_configurations.Value.TargetHierarchicalNamespaceTenantName);
+
///
/// When loading our test configuration, we'll check the
/// AZ_STORAGE_CONFIG_PATH first.
@@ -193,6 +205,7 @@ private static TestConfigurations ReadFromXml(XDocument doc)
TargetPremiumBlobTenantName = Get("TargetPremiumBlobTenant"),
TargetPreviewBlobTenantName = Get("TargetPreviewBlobTenant"),
TargetOAuthTenantName = Get("TargetOAuthTenant"),
+ TargetHierarchicalNamespaceTenantName = Get("TargetHierarchicalNamespaceTenant"),
Tenants =
config.Element("TenantConfigurations").Elements("TenantConfiguration")
.Select(TenantConfiguration.Parse)
diff --git a/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurationsTemplate.xml b/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurationsTemplate.xml
index 49d655607812..e1d4ad31476b 100644
--- a/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurationsTemplate.xml
+++ b/sdk/storage/Azure.Storage.Common/tests/Shared/TestConfigurationsTemplate.xml
@@ -4,6 +4,7 @@
SecondaryTenant
NotInPreview
OAuthTenant
+ NamespaceTenant
@@ -23,6 +24,7 @@
DefaultEndpointsProtocol=https;AccountName=[ACCOUNT];AccountKey=[ACCOUNT-KEY];EndpointSuffix=core.windows.net
+
SecondaryTenant
@@ -39,16 +41,19 @@
http://[RAGRS-ACCOUNT]-secondary.table.core.windows.net
+
OAuthTenant
Cloud
[OAUTH-ACCOUNT]
[OAUTH-ACCOUNT-KEY]
+
[ActiveDirectoryApplicationId]
[ActiveDirectoryApplicationSecret]
[ActiveDirectoryTenantId]
https://login.microsoftonline.com/
+
http://[OAUTH-ACCOUNT].blob.core.windows.net
http://[OAUTH-ACCOUNT].queue.core.windows.net
http://[OAUTH-ACCOUNT].table.core.windows.net
@@ -59,7 +64,8 @@
http://[OAUTH-ACCOUNT]-secondary.table.core.windows.net
-
+
+
PremiumBlobTenant
Cloud
@@ -74,5 +80,28 @@
http://[PREMIUM-ACCOUNT]-secondary.file.core.windows.net
http://[PREMIUM-ACCOUNT]-secondary.table.core.windows.net
+
+
+
+
+ NamespaceTenant
+ Cloud
+ [NAMESPACE-ACCOUNT]
+ [NAMESPACE-ACCOUNT-KEY]
+
+ [ActiveDirectoryApplicationId]
+ [ActiveDirectoryApplicationSecret]
+ [ActiveDirectoryTenantId]
+ https://login.microsoftonline.com/
+
+ http://[NAMESPACE-ACCOUNT].blob.core.windows.net
+ http://[NAMESPACE-ACCOUNT].queue.core.windows.net
+ http://[NAMESPACE-ACCOUNT].table.core.windows.net
+ http://[NAMESPACE-ACCOUNT].file.core.windows.net
+ http://[NAMESPACE-ACCOUNT]-secondary.blob.core.windows.net
+ http://[NAMESPACE-ACCOUNT]-secondary.queue.core.windows.net
+ http://[NAMESPACE-ACCOUNT]-secondary.file.core.windows.net
+ http://[NAMESPACE-ACCOUNT]-secondary.table.core.windows.net
+
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/AssemblyInfo.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/AssemblyInfo.cs
index 8f74f7f8b85a..ab700ce5bdf8 100644
--- a/sdk/storage/Azure.Storage.Files.DataLake/src/AssemblyInfo.cs
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/AssemblyInfo.cs
@@ -9,3 +9,4 @@
"012ea67d2479751a0b8c087a4185541b851bd8b16f8d91b840e51b1cb0ba6fe647997e57429265" +
"e85ef62d565db50a69ae1647d54d7bd855e4db3d8a91510e5bcbd0edfbbecaa20a7bd9ae74593d" +
"aa7b11b4")]
+[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")]
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj
index ee2eb6b0495a..598d352aac9c 100644
--- a/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Azure.Storage.Files.DataLake.csproj
@@ -18,7 +18,11 @@
REST API Reference for Data Lake - https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/filesystem
+
+
+
+
\ No newline at end of file
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/BlobContainerItemExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobContainerItemExtensions.cs
new file mode 100644
index 000000000000..eb01450e77ad
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobContainerItemExtensions.cs
@@ -0,0 +1,33 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs.Models;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class BlobContainerItemExtensions
+ {
+ internal static FileSystemItem ToFileSystemItem(this BlobContainerItem containerItem) =>
+ new FileSystemItem()
+ {
+ Name = containerItem.Name,
+ Properties = new FileSystemProperties()
+ {
+ LastModified = containerItem.Properties.LastModified,
+ LeaseStatus = containerItem.Properties.LeaseStatus.HasValue
+ ? (Models.LeaseStatus)containerItem.Properties.LeaseStatus : default,
+ LeaseState = containerItem.Properties.LeaseState.HasValue
+ ? (Models.LeaseState)containerItem.Properties.LeaseState : default,
+ LeaseDuration = containerItem.Properties.LeaseDuration.HasValue
+ ? (Models.LeaseDurationType)containerItem.Properties.LeaseDuration : default,
+ PublicAccess = containerItem.Properties.PublicAccess.HasValue
+ ? (Models.PublicAccessType)containerItem.Properties.PublicAccess : default,
+ HasImmutabilityPolicy = containerItem.Properties.HasImmutabilityPolicy,
+ HasLegalHold = containerItem.Properties.HasLegalHold,
+ ETag = containerItem.Properties.ETag
+ },
+ Metadata = containerItem.Metadata
+ };
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/BlobDownloadDetailsExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobDownloadDetailsExtensions.cs
new file mode 100644
index 000000000000..bca81f9bb632
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobDownloadDetailsExtensions.cs
@@ -0,0 +1,35 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs.Models;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class BlobDownloadDetailsExtensions
+ {
+ internal static FileDownloadDetails ToFileDownloadDetails(this BlobDownloadDetails blobDownloadProperties) =>
+ new FileDownloadDetails()
+ {
+ LastModified = blobDownloadProperties.LastModified,
+ Metadata = blobDownloadProperties.Metadata,
+ ContentRange = blobDownloadProperties.ContentRange,
+ ETag = blobDownloadProperties.ETag,
+ ContentEncoding = blobDownloadProperties.ContentEncoding,
+ ContentDisposition = blobDownloadProperties.ContentDisposition,
+ ContentLanguage = blobDownloadProperties.ContentLanguage,
+ CopyCompletedOn = blobDownloadProperties.CopyCompletedOn,
+ CopyStatusDescription = blobDownloadProperties.CopyStatusDescription,
+ CopyId = blobDownloadProperties.CopyId,
+ CopyProgress = blobDownloadProperties.CopyProgress,
+ CopyStatus = (Models.CopyStatus)blobDownloadProperties.CopyStatus,
+ LeaseDuration = (Models.LeaseDurationType)blobDownloadProperties.LeaseDuration,
+ LeaseState = (Models.LeaseState)blobDownloadProperties.LeaseState,
+ LeaseStatus = (Models.LeaseStatus)blobDownloadProperties.LeaseStatus,
+ AcceptRanges = blobDownloadProperties.AcceptRanges,
+ IsServerEncrypted = blobDownloadProperties.IsServerEncrypted,
+ EncryptionKeySha256 = blobDownloadProperties.EncryptionKeySha256,
+ ContentHash = blobDownloadProperties.BlobContentHash
+ };
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/BlobDownloadInfoExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobDownloadInfoExtensions.cs
new file mode 100644
index 000000000000..89468d44f740
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobDownloadInfoExtensions.cs
@@ -0,0 +1,20 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs.Models;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class BlobDownloadInfoExtensions
+ {
+ internal static FileDownloadInfo ToFileDownloadInfo(this BlobDownloadInfo blobDownloadInfo) =>
+ new FileDownloadInfo()
+ {
+ ContentLength = blobDownloadInfo.ContentLength,
+ Content = blobDownloadInfo.Content,
+ ContentHash = blobDownloadInfo.ContentHash,
+ Properties = blobDownloadInfo.Details.ToFileDownloadDetails()
+ };
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/BlobPropertiesExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobPropertiesExtensions.cs
new file mode 100644
index 000000000000..462a0a79e45f
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/BlobPropertiesExtensions.cs
@@ -0,0 +1,42 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs.Models;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class BlobPropertiesExtensions
+ {
+ internal static PathProperties ToPathProperties(this BlobProperties blobProperties) =>
+ new PathProperties()
+ {
+ LastModified = blobProperties.LastModified,
+ CreatedOn = blobProperties.CreatedOn,
+ Metadata = blobProperties.Metadata,
+ CopyCompletedOn = blobProperties.CopyCompletedOn,
+ CopyStatusDescription = blobProperties.CopyStatusDescription,
+ CopyId = blobProperties.CopyId,
+ CopyProgress = blobProperties.CopyProgress,
+ CopySource = blobProperties.CopySource,
+ IsIncrementalCopy = blobProperties.IsIncrementalCopy,
+ LeaseDuration = (Models.LeaseDurationType)blobProperties.LeaseDuration,
+ LeaseStatus = (Models.LeaseStatus)blobProperties.LeaseStatus,
+ LeaseState = (Models.LeaseState)blobProperties.LeaseState,
+ ContentLength = blobProperties.ContentLength,
+ ContentType = blobProperties.ContentType,
+ ETag = blobProperties.ETag,
+ ContentHash = blobProperties.ContentHash,
+ ContentEncoding = blobProperties.ContentEncoding,
+ ContentDisposition = blobProperties.ContentDisposition,
+ ContentLanguage = blobProperties.ContentLanguage,
+ CacheControl = blobProperties.CacheControl,
+ AcceptRanges = blobProperties.AcceptRanges,
+ IsServerEncrypted = blobProperties.IsServerEncrypted,
+ EncryptionKeySha256 = blobProperties.EncryptionKeySha256,
+ AccessTier = blobProperties.AccessTier,
+ ArchiveStatus = blobProperties.ArchiveStatus,
+ AccessTierChangedOn = blobProperties.AccessTierChangedOn
+ };
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeClientOptions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeClientOptions.cs
new file mode 100644
index 000000000000..918b0aacdf51
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeClientOptions.cs
@@ -0,0 +1,19 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Core;
+using Azure.Storage.Blobs;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+#pragma warning disable AZC0008 // ClientOptions should have a nested enum called ServiceVersion
+ ///
+ /// Provides the client configuration options for connecting to Azure Data Lake service.
+ ///
+ public class DataLakeClientOptions : BlobClientOptions
+#pragma warning restore AZC0008 // ClientOptions should have a nested enum called ServiceVersion
+ {
+
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeLeaseClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeLeaseClient.cs
new file mode 100644
index 000000000000..906c22eeb32c
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeLeaseClient.cs
@@ -0,0 +1,571 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+using Azure.Core;
+using Azure.Storage.Blobs;
+using Azure.Storage.Blobs.Specialized;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// The allows you to manipulate Azure
+ /// Storage leases on paths.
+ ///
+ public class DataLakeLeaseClient
+ {
+ ///
+ /// Blob lease client for managing leases.
+ ///
+ private readonly BlobLeaseClient _blobLeaseClient;
+
+ ///
+ /// The representing an infinite lease duration.
+ ///
+ public static readonly TimeSpan InfiniteLeaseDuration = TimeSpan.FromSeconds(Constants.Blob.Lease.InfiniteLeaseDuration);
+
+ ///
+ /// Gets the URI of the object being leased.
+ ///
+ public Uri Uri => _blobLeaseClient.Uri;
+
+ ///
+ /// Gets the Lease ID for this lease.
+ ///
+ public virtual string LeaseId => _blobLeaseClient.LeaseId;
+
+ #region ctors
+ ///
+ /// Initializes a new instance of the class
+ /// for mocking.
+ ///
+ protected DataLakeLeaseClient()
+ {
+ _blobLeaseClient = null;
+ }
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ ///
+ /// A representing the blob being leased.
+ ///
+ ///
+ /// An optional lease ID. If no lease ID is provided, a random lease
+ /// ID will be created.
+ ///
+ public DataLakeLeaseClient(PathClient client, string leaseId = null)
+ {
+ _blobLeaseClient = new Blobs.Specialized.BlobLeaseClient(client.BlobClient, leaseId);
+ }
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ ///
+ /// A representing the file system
+ /// being leased.
+ ///
+ ///
+ /// An optional lease ID. If no lease ID is provided, a random lease
+ /// ID will be created.
+ ///
+ public DataLakeLeaseClient(FileSystemClient client, string leaseId = null)
+ {
+ _blobLeaseClient = new Blobs.Specialized.BlobLeaseClient(client.ContainerClient, leaseId);
+ }
+ #endregion ctors
+
+ #region Acquire
+ ///
+ /// The operation acquires a lease on
+ /// the path or file system. The lease must
+ /// be between 15 to 60 seconds, or infinite (-1).
+ ///
+ /// If the file system does not have an active lease, the Data Lake service
+ /// creates a lease on the path or file system and returns it. If the
+ /// file system has an active lease, you can only request a new lease
+ /// using the active lease ID as , but you can
+ /// specify a new .
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies the duration of the lease, in seconds, or specify
+ /// for a lease that never expires.
+ /// A non-infinite lease can be between 15 and 60 seconds.
+ /// A lease duration cannot be changed using or .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Acquire(
+ TimeSpan duration,
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blobLeaseClient.Acquire(
+ duration,
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation acquires a lease on
+ /// the path or file system. The lease must
+ /// be between 15 to 60 seconds, or infinite (-1).
+ ///
+ /// If the file system does not have an active lease, the Data Lake service
+ /// creates a lease on the file system or path and returns it. If the
+ /// file system has an active lease, you can only request a new lease
+ /// using the active lease ID as , but you can
+ /// specify a new .
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies the duration of the lease, in seconds, or specify
+ /// for a lease that never expires.
+ /// A non-infinite lease can be between 15 and 60 seconds.
+ /// A lease duration cannot be changed using or .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> AcquireAsync(
+ TimeSpan duration,
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blobLeaseClient.AcquireAsync(
+ duration,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+
+ #endregion Acquire
+
+ #region Renew
+ ///
+ /// The operation renews the path or
+ /// file system's previously-acquired lease.
+ ///
+ /// The lease can be renewed if the leaseId
+ /// matches that associated with the path or file system. Note that the]
+ /// lease may be renewed even if it has expired as long as the path or
+ /// file system has not been leased again since the expiration of that
+ /// lease. When you renew a lease, the lease duration clock resets.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Renew(
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blobLeaseClient.Renew(
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation renews the path or
+ /// file system's previously-acquired lease.
+ ///
+ /// The lease can be renewed if the leaseId
+ /// matches that associated with the path or file system. Note that the
+ /// lease may be renewed even if it has expired as long as the path or
+ /// file system has not been leased again since the expiration of that
+ /// lease. When you renew a lease, the lease duration clock resets.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> RenewAsync(
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blobLeaseClient.RenewAsync(
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+ #endregion Renew
+
+ #region Release
+ ///
+ /// The operation releases the
+ /// file system or path's previously-acquired lease.
+ ///
+ /// The lease may be released if the
+ /// matches that associated with the file system or path. Releasing the
+ /// lease allows another client to immediately acquire the lease for the
+ /// file system or path as soon as the release is complete.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// updated path or file system.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Release(
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blobLeaseClient.Release(
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ new ReleasedObjectInfo(response.Value.ETag, response.Value.LastModified),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation releases the
+ /// file system or path's previously-acquired lease.
+ ///
+ /// The lease may be released if the
+ /// matches that associated with the file system or path. Releasing the
+ /// lease allows another client to immediately acquire the lease for the
+ /// file system or path as soon as the release is complete.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// updated path or file system.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> ReleaseAsync(
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blobLeaseClient.ReleaseAsync(
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new ReleasedObjectInfo(response.Value.ETag, response.Value.LastModified),
+ response.GetRawResponse());
+ }
+ #endregion Release
+
+ #region Change
+ ///
+ /// The operation changes the lease
+ /// of an active lease. A change must include the current
+ /// and a new .
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// An optional proposed lease ID, in a GUID string format. A
+ /// will be thrown if the
+ /// proposed lease ID is not in the correct format.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Change(
+ string proposedId,
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blobLeaseClient.Change(
+ proposedId,
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation changes the lease
+ /// of an active lease. A change must include the current
+ /// and a new .
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// An optional proposed lease ID, in a GUID string format. A
+ /// will be thrown if the
+ /// proposed lease ID is not in the correct format.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> ChangeAsync(
+ string proposedId,
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blobLeaseClient.ChangeAsync(
+ proposedId,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+ #endregion Change
+
+ #region Break
+ ///
+ /// The operation breaks the path or
+ /// file system's previously-acquired lease (if it exists).
+ ///
+ /// Once a lease is broken, it cannot be renewed. Any authorized
+ /// request can break the lease; the request is not required to
+ /// specify a matching lease ID. When a lease is broken, the lease
+ /// break is allowed to elapse,
+ /// during which time no lease operation except
+ /// and can be
+ /// performed on the path or file system. When a lease is successfully
+ /// broken, the response indicates the interval in seconds until a new
+ /// lease can be acquired.
+ ///
+ /// A lease that has been broken can also be released. A client can
+ /// immediately acquire a path or file system lease that has been
+ /// released.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies the proposed duration the lease should continue before
+ /// it is broken, in seconds, between 0 and 60. This break period is
+ /// only used if it is shorter than the time remaining on the lease.
+ /// If longer, the time remaining on the lease is used. A new lease
+ /// will not be available before the break period has expired, but the
+ /// lease may be held for longer than the break period. If this value
+ /// is not provided, a fixed-duration lease breaks after the remaining
+ /// lease period elapses, and an infinite lease breaks immediately.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the broken lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Break(
+ TimeSpan? breakPeriod = default,
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blobLeaseClient.Break(
+ breakPeriod,
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation breaks the path or
+ /// file system's previously-acquired lease (if it exists).
+ ///
+ /// Once a lease is broken, it cannot be renewed. Any authorized
+ /// request can break the lease; the request is not required to
+ /// specify a matching lease ID. When a lease is broken, the lease
+ /// break is allowed to elapse,
+ /// during which time no lease operation except
+ /// and can be
+ /// performed on the path or file system. When a lease is successfully
+ /// broken, the response indicates the interval in seconds until a new
+ /// lease can be acquired.
+ ///
+ /// A lease that has been broken can also be released. A client can
+ /// immediately acquire a path or file system lease that has been
+ /// released.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies the proposed duration the lease should continue before
+ /// it is broken, in seconds, between 0 and 60. This break period is
+ /// only used if it is shorter than the time remaining on the lease.
+ /// If longer, the time remaining on the lease is used. A new lease
+ /// will not be available before the break period has expired, but the
+ /// lease may be held for longer than the break period. If this value
+ /// is not provided, a fixed-duration lease breaks after the remaining
+ /// lease period elapses, and an infinite lease breaks immediately.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on acquiring a lease.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the broken lease.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> BreakAsync(
+ TimeSpan? breakPeriod = default,
+ RequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blobLeaseClient.BreakAsync(
+ breakPeriod,
+ conditions,
+ cancellationToken).ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToDataLakeLease(),
+ response.GetRawResponse());
+ }
+ #endregion Break
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeLeaseClientExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeLeaseClientExtensions.cs
new file mode 100644
index 000000000000..c48b7c331f59
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeLeaseClientExtensions.cs
@@ -0,0 +1,45 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// Add easy to discover methods to and
+ /// for easily creating
+ /// instances.
+ ///
+ public static partial class DataLakeLeaseClientExtensions
+ {
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ ///
+ /// A representing the path being leased.
+ ///
+ ///
+ /// An optional lease ID. If no lease ID is provided, a random lease
+ /// ID will be created.
+ ///
+ public static DataLakeLeaseClient GetLeaseClient(
+ this PathClient client,
+ string leaseId = null) =>
+ new DataLakeLeaseClient(client, leaseId);
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ ///
+ /// A representing the file system being leased.
+ ///
+ ///
+ /// An optional lease ID. If no lease ID is provided, a random lease
+ /// ID will be created.
+ ///
+ public static DataLakeLeaseClient GetLeaseClient(
+ this FileSystemClient client,
+ string leaseId = null) =>
+ new DataLakeLeaseClient(client, leaseId);
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeServiceClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeServiceClient.cs
new file mode 100644
index 000000000000..4dcdf28c69ed
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeServiceClient.cs
@@ -0,0 +1,567 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+using Azure.Core;
+using Azure.Core.Pipeline;
+using Azure.Storage.Blobs;
+using Azure.Storage.Files.DataLake.Models;
+using Metadata = System.Collections.Generic.IDictionary;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// The allows you to manipulate Azure
+ /// Data Lake service resources and file systems. The storage account provides
+ /// the top-level namespace for the Data Lake service.
+ ///
+ public class DataLakeServiceClient
+ {
+ ///
+ /// The associated with the file system.
+ ///
+ private readonly BlobServiceClient _blobServiceClient;
+
+ ///
+ /// The Data Lake service's customer-provided endpoint.
+ ///
+ private readonly Uri _uri;
+
+ ///
+ /// The Data Lake service's blob endpoint.
+ ///
+ private readonly Uri _blobUri;
+
+ ///
+ /// Gets the Data Lake service's primary endpoint.
+ ///
+ public virtual Uri Uri => _uri;
+
+ ///
+ /// The transport pipeline used to send
+ /// every request.
+ ///
+ private readonly HttpPipeline _pipeline;
+
+ ///
+ /// Gets tghe transport pipeline used to
+ /// send every request.
+ ///
+ protected virtual HttpPipeline Pipeline => _pipeline;
+
+ ///
+ /// The instance used to create diagnostic scopes
+ /// every request.
+ ///
+ private readonly ClientDiagnostics _clientDiagnostics;
+
+ ///
+ /// The instance used to create diagnostic scopes
+ /// every request.
+ ///
+ internal virtual ClientDiagnostics ClientDiagnostics => _clientDiagnostics;
+
+ ///
+ /// The Storage account name corresponding to the file service client.
+ ///
+ private string _accountName;
+
+ ///
+ /// Gets the Storage account name corresponding to the file service client.
+ ///
+ public virtual string AccountName
+ {
+ get
+ {
+ if (_accountName == null)
+ {
+ _accountName = new DataLakeUriBuilder(Uri).AccountName;
+ }
+ return _accountName;
+ }
+ }
+
+ #region ctors
+ ///
+ /// Initializes a new instance of the
+ /// class for mocking.
+ ///
+ protected DataLakeServiceClient()
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the Data Lake service.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public DataLakeServiceClient(Uri serviceUri, DataLakeClientOptions options = default)
+ : this(serviceUri, (HttpPipelinePolicy)null, options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the Data Lake service.
+ ///
+ ///
+ /// The shared key credential used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public DataLakeServiceClient(Uri serviceUri, StorageSharedKeyCredential credential, DataLakeClientOptions options = default)
+ : this(serviceUri, credential.AsPolicy(), options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the Data Lake service.
+ ///
+ ///
+ /// The token credential used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public DataLakeServiceClient(Uri serviceUri, TokenCredential credential, DataLakeClientOptions options = default)
+ : this(serviceUri, credential.AsPolicy(), options ?? new DataLakeClientOptions())
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the Data Lake service
+ ///
+ ///
+ /// An optional authentication policy used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ internal DataLakeServiceClient(Uri serviceUri, HttpPipelinePolicy authentication, DataLakeClientOptions options)
+ : this(serviceUri, authentication, options, new ClientDiagnostics(options))
+ {
+
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the Data Lake service.
+ ///
+ ///
+ /// An optional authentication policy used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ ///
+ internal DataLakeServiceClient(Uri serviceUri, HttpPipelinePolicy authentication, DataLakeClientOptions options, ClientDiagnostics clientDiagnostics)
+ {
+
+ _pipeline = (options ?? new DataLakeClientOptions()).Build(authentication);
+ _uri = serviceUri;
+ _blobUri = GetBlobUri(serviceUri);
+ _blobServiceClient = new BlobServiceClient(_blobUri, authentication, options);
+ _clientDiagnostics = clientDiagnostics;
+ }
+ #endregion ctors
+
+ ///
+ /// Gets the blob Uri.
+ ///
+ private static Uri GetBlobUri(Uri uri)
+ {
+ Uri blobUri;
+ if (uri.Host.Contains(Constants.DataLake.DfsUriSuffix))
+ {
+ UriBuilder uriBuilder = new UriBuilder(uri);
+ uriBuilder.Host = uriBuilder.Host.Replace(
+ Constants.DataLake.DfsUriSuffix,
+ Constants.DataLake.BlobUriSuffix);
+ blobUri = uriBuilder.Uri;
+ }
+ else
+ {
+ blobUri = uri;
+ }
+ return blobUri;
+ }
+
+ ///
+ /// Create a new object by appending
+ /// to the end of .
+ /// The new uses the same request
+ /// policy pipeline as the .
+ ///
+ ///
+ /// The name of the share to reference.
+ ///
+ ///
+ /// A for the desired share.
+ ///
+ public virtual FileSystemClient GetFileSystemClient(string fileSystemName)
+ => new FileSystemClient(Uri.AppendToPath(fileSystemName), Pipeline, ClientDiagnostics);
+
+ #region Get User Delegation Key
+ ///
+ /// The operation retrieves a
+ /// key that can be used to delegate Active Directory authorization to
+ /// shared access signatures created with .
+ ///
+ ///
+ /// Start time for the key's validity, with null indicating an
+ /// immediate start. The time should be specified in UTC.
+ ///
+ ///
+ /// Expiration of the key's validity. The time should be specified
+ /// in UTC.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing
+ /// the use delegation key.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response GetUserDelegationKey(
+ DateTimeOffset? start,
+ DateTimeOffset expiry,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blobServiceClient.GetUserDelegationKey(
+ start,
+ expiry,
+ cancellationToken);
+
+ return Response.FromValue(
+ new UserDelegationKey(response.Value),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation retrieves a
+ /// key that can be used to delegate Active Directory authorization to
+ /// shared access signatures created with .
+ ///
+ ///
+ /// Start time for the key's validity, with null indicating an
+ /// immediate start. The time should be specified in UTC.
+ ///
+ ///
+ /// Expiration of the key's validity. The time should be specified
+ /// in UTC.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing
+ /// the use delegation key.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> GetUserDelegationKeyAsync(
+ DateTimeOffset? start,
+ DateTimeOffset expiry,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blobServiceClient.GetUserDelegationKeyAsync(
+ start,
+ expiry,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new UserDelegationKey(response.Value),
+ response.GetRawResponse());
+ }
+
+ #endregion Get User Delegation Key
+
+ #region Get File Systems
+ ///
+ /// The operation returns an async
+ /// sequence of file systems in the storage account. Enumerating the
+ /// file systems may make multiple requests to the service while fetching
+ /// all the values. File systems are ordered lexicographically by name.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies trait options for shaping the file systems.
+ ///
+ ///
+ /// Specifies a string that filters the results to return only file systems
+ /// whose name begins with the specified .
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// An of
+ /// describing the file systems in the storage account.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ public virtual Pageable GetFileSystems(
+ FileSystemTraits traits = FileSystemTraits.None,
+ string prefix = default,
+ CancellationToken cancellationToken = default) =>
+ new GetFileSystemsAsyncCollection(_blobServiceClient, traits, prefix).ToSyncCollection(cancellationToken);
+
+ ///
+ /// The operation returns an async
+ /// sequence of blob file system in the storage account. Enumerating the
+ /// files systems may make multiple requests to the service while fetching
+ /// all the values. File systems are ordered lexicographically by name.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies trait options for shaping the file systems.
+ ///
+ ///
+ /// Specifies a string that filters the results to return only file systems
+ /// whose name begins with the specified .
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// An describing the
+ /// file systems in the storage account.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ public virtual AsyncPageable GetFileSystemsAsync(
+ FileSystemTraits traits = FileSystemTraits.None,
+ string prefix = default,
+ CancellationToken cancellationToken = default) =>
+ new GetFileSystemsAsyncCollection(_blobServiceClient, traits, prefix).ToAsyncCollection(cancellationToken);
+ #endregion Get File Systems
+
+ #region Create File System
+ ///
+ /// The operation creates a new
+ /// file system under the specified account. If the file systen with the
+ /// same name already exists, the operation fails.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The name of the file system to create.
+ ///
+ ///
+ /// Optionally specifies whether data in the file system may be accessed
+ /// publicly and the level of access.
+ /// specifies full public read access for file system and blob data.
+ /// Clients can enumerate blobs within the file system via anonymous
+ /// request, but cannot enumerate file systems within the storage
+ /// account. specifies public
+ /// read access for blobs. Blob data within this file system can be
+ /// read via anonymous request, but file system data is not available.
+ /// Clients cannot enumerate blobs within the file system via anonymous
+ /// request. specifies that the
+ /// file system data is private to the account owner.
+ ///
+ ///
+ /// Optional custom metadata to set for this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A referencing the
+ /// newly created file system.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response CreateFileSystem(
+ string fileSystemName,
+ PublicAccessType publicAccessType = PublicAccessType.None,
+ Metadata metadata = default,
+ CancellationToken cancellationToken = default)
+ {
+ FileSystemClient fileSystem = GetFileSystemClient(fileSystemName);
+ Response response = fileSystem.Create(publicAccessType, metadata, cancellationToken);
+ return Response.FromValue(fileSystem, response.GetRawResponse());
+ }
+
+ ///
+ /// The operation creates a new
+ /// file system under the specified account. If the file system with the
+ /// same name already exists, the operation fails.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The name of the file system to create.
+ ///
+ ///
+ /// Optionally specifies whether data in the file system may be accessed
+ /// publicly and the level of access.
+ /// specifies full public read access for file system and blob data.
+ /// Clients can enumerate blobs within the file system via anonymous
+ /// request, but cannot enumerate file systems within the storage
+ /// account. specifies public
+ /// read access for blobs. Blob data within this file system can be
+ /// read via anonymous request, but file system data is not available.
+ /// Clients cannot enumerate blobs within the file system via anonymous
+ /// request. specifies that the
+ /// file system data is private to the account owner.
+ ///
+ ///
+ /// Optional custom metadata to set for this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A referencing the
+ /// newly created file system.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateFileSystemAsync(
+ string fileSystemName,
+ PublicAccessType publicAccessType = PublicAccessType.None,
+ Metadata metadata = default,
+ CancellationToken cancellationToken = default)
+ {
+ FileSystemClient fileSystem = GetFileSystemClient(fileSystemName);
+ Response response = await fileSystem.CreateAsync(publicAccessType, metadata, cancellationToken).ConfigureAwait(false);
+ return Response.FromValue(fileSystem, response.GetRawResponse());
+ }
+ #endregion Create File System
+
+ #region Delete File System
+ ///
+ /// The operation marks the
+ /// specified blob file system for deletion. The file system and any blobs
+ /// contained within it are later deleted during garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The name of the file system to delete.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the deletion of this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A if successful.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response DeleteFileSystem(
+ string fileSystemName,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default) =>
+ GetFileSystemClient(fileSystemName)
+ .Delete(
+ accessConditions,
+ cancellationToken);
+
+ ///
+ /// The operation marks the
+ /// specified file system for deletion. The file system and any blobs
+ /// contained within it are later deleted during garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The name of the file system to delete.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the deletion of this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A if successful.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteFileSystemAsync(
+ string fileSystemName,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default) =>
+ await GetFileSystemClient(fileSystemName)
+ .DeleteAsync(
+ accessConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Delete File System
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeUriBuilder.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeUriBuilder.cs
new file mode 100644
index 000000000000..4dd931f571ce
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DataLakeUriBuilder.cs
@@ -0,0 +1,307 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Linq;
+using System.Net;
+using System.Text;
+using Azure.Core;
+using Azure.Storage.Sas;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// The class provides a convenient way to
+ /// modify the contents of a instance to point to
+ /// different Azure Data Lake resources like an file system, directory, or file.
+ ///
+ public class DataLakeUriBuilder
+ {
+ ///
+ /// The Uri instance constructed by this builder. It will be reset to
+ /// null when changes are made and reconstructed when
+ /// is accessed.
+ ///
+ private Uri _uri;
+
+ ///
+ /// Gets or sets the scheme name of the URI.
+ /// Example: "https"
+ ///
+ public string Scheme
+ {
+ get => _scheme;
+ set { ResetUri(); _scheme = value; }
+ }
+ private string _scheme;
+
+ ///
+ /// Gets or sets the Domain Name System (DNS) host name or IP address
+ /// of a server.
+ ///
+ /// Example: "account.file.core.windows.net"
+ ///
+ public string Host
+ {
+ get => _host;
+ set { ResetUri(); _host = value; }
+ }
+ private string _host;
+
+ ///
+ /// Gets or sets the port number of the URI.
+ ///
+ public int Port
+ {
+ get => _port;
+ set { ResetUri(); _port = value; }
+ }
+ private int _port;
+
+ ///
+ /// Gets or sets the Azure Storage account name. This is only
+ /// populated for IP-style s.
+ ///
+ public string AccountName
+ {
+ get => _accountName;
+ set { ResetUri(); _accountName = value; }
+ }
+ private string _accountName;
+
+ ///
+ /// Gets or sets the name of a file storage share. The value defaults
+ /// to if not present in the
+ /// .
+ ///
+ ///
+ public string FileSystemName
+ {
+ get => _fileSystemName;
+ set { ResetUri(); _fileSystemName = value; }
+ }
+ private string _fileSystemName;
+
+ ///
+ /// Gets or sets the path of the directory or file. The value defaults
+ /// to if not present in the
+ /// .
+ /// Example: "mydirectory/myfile"
+ ///
+ public string DirectoryOrFilePath
+ {
+ get => _directoryOrFilePath;
+ set { ResetUri(); _directoryOrFilePath = value; }
+ }
+ private string _directoryOrFilePath;
+
+ ///
+ /// Gets or sets the name of a file snapshot. The value defaults to
+ /// if not present in the .
+ ///
+ public string Snapshot
+ {
+ get => _snapshot;
+ set { ResetUri(); _snapshot = value; }
+ }
+ private string _snapshot;
+
+ ///
+ /// Gets or sets the Shared Access Signature query parameters, or null
+ /// if not present in the .
+ ///
+ public SasQueryParameters Sas
+ {
+ get => _sas;
+ set { ResetUri(); _sas = value; }
+ }
+ private SasQueryParameters _sas;
+
+ ///
+ /// Get the last directory or file name from the , or null if
+ /// not present in the .
+ ///
+ internal string LastDirectoryOrFileName =>
+ DirectoryOrFilePath.TrimEnd('/').Split('/').LastOrDefault();
+
+ ///
+ /// Gets or sets any query information included in the URI that's not
+ /// relevant to addressing Azure storage resources.
+ ///
+ public string Query
+ {
+ get => _query;
+ set { ResetUri(); _query = value; }
+ }
+ private string _query;
+
+ ///
+ /// Initializes a new instance of the
+ /// class with the specified .
+ ///
+ ///
+ /// The to a storage resource.
+ ///
+ public DataLakeUriBuilder(Uri uri)
+ {
+ Scheme = uri.Scheme;
+ Host = uri.Host;
+ Port = uri.Port;
+ AccountName = "";
+
+ FileSystemName = "";
+ DirectoryOrFilePath = "";
+
+ Snapshot = "";
+ Sas = null;
+
+ // Find the share & directory/file path (if any)
+ if (!string.IsNullOrEmpty(uri.AbsolutePath))
+ {
+ // If path starts with a slash, remove it
+
+ var path =
+ (uri.AbsolutePath[0] == '/')
+ ? uri.AbsolutePath.Substring(1)
+ : uri.AbsolutePath;
+
+ var startIndex = 0;
+
+ if (IsHostIPEndPointStyle(uri.Host))
+ {
+ var accountEndIndex = path.IndexOf("/", StringComparison.InvariantCulture);
+
+ // Slash not found; path has account name & no share name
+ if (accountEndIndex == -1)
+ {
+ AccountName = path;
+ startIndex = path.Length;
+ }
+ else
+ {
+ AccountName = path.Substring(0, accountEndIndex);
+ startIndex = accountEndIndex + 1;
+ }
+ }
+
+ // Find the next slash (if it exists)
+
+ var shareEndIndex = path.IndexOf("/", startIndex, StringComparison.InvariantCulture);
+ if (shareEndIndex == -1)
+ {
+ FileSystemName = path.Substring(startIndex); // Slash not found; path has share name & no directory/file path
+ }
+ else
+ {
+ FileSystemName = path.Substring(startIndex, shareEndIndex - startIndex); // The share name is the part between the slashes
+ DirectoryOrFilePath = path.Substring(shareEndIndex + 1); // The directory/file path name is after the share slash
+ }
+ }
+
+ // Convert the query parameters to a case-sensitive map & trim whitespace
+
+ var paramsMap = new UriQueryParamsCollection(uri.Query);
+
+ if (paramsMap.TryGetValue(Constants.SnapshotParameterName, out var snapshotTime))
+ {
+ Snapshot = snapshotTime;
+
+ // If we recognized the query parameter, remove it from the map
+ paramsMap.Remove(Constants.SnapshotParameterName);
+ }
+
+ if (paramsMap.ContainsKey(Constants.Sas.Parameters.Version))
+ {
+ Sas = new SasQueryParameters(paramsMap);
+ }
+
+ Query = paramsMap.ToString();
+ }
+
+ ///
+ /// Returns the constructed from the
+ /// 's fields. The
+ /// property contains the SAS and additional query parameters.
+ ///
+ public Uri ToUri()
+ {
+ if (_uri == null)
+ {
+ _uri = BuildUri().ToUri();
+ }
+ return _uri;
+ }
+
+ ///
+ /// Returns the display string for the specified
+ /// instance.
+ ///
+ ///
+ /// The display string for the specified
+ /// instance.
+ ///
+ public override string ToString() =>
+ BuildUri().ToString();
+
+ ///
+ /// Reset our cached URI.
+ ///
+ private void ResetUri() =>
+ _uri = null;
+
+ ///
+ /// Construct a representing the
+ /// 's fields. The
+ /// property contains the SAS, snapshot, and additional query parameters.
+ ///
+ /// The constructed .
+ private RequestUriBuilder BuildUri()
+ {
+ // Concatenate account, share & directory/file path (if they exist)
+ var path = new StringBuilder("");
+ if (!string.IsNullOrWhiteSpace(AccountName))
+ {
+ path.Append("/").Append(AccountName);
+ }
+ if (!string.IsNullOrWhiteSpace(FileSystemName))
+ {
+ path.Append("/").Append(FileSystemName);
+ if (!string.IsNullOrWhiteSpace(DirectoryOrFilePath))
+ {
+ path.Append("/").Append(DirectoryOrFilePath);
+ }
+ }
+
+ // Concatenate query parameters
+ var query = new StringBuilder(Query);
+ if (!string.IsNullOrWhiteSpace(Snapshot))
+ {
+ if (query.Length > 0)
+ { query.Append("&"); }
+ query.Append(Constants.SnapshotParameterName).Append("=").Append(Snapshot);
+ }
+ var sas = Sas?.ToString();
+ if (!string.IsNullOrWhiteSpace(sas))
+ {
+ if (query.Length > 0)
+ { query.Append("&"); }
+ query.Append(sas);
+ }
+
+ // Use RequestUriBuilder, which has slightly nicer formatting
+ return new RequestUriBuilder
+ {
+ Scheme = Scheme,
+ Host = Host,
+ Port = Port,
+ Path = path.ToString(),
+ Query = query.Length > 0 ? "?" + query.ToString() : null
+ };
+ }
+
+ // TODO See remarks at https://docs.microsoft.com/en-us/dotnet/api/system.net.ipaddress.tryparse?view=netframework-4.7.2
+ // TODO refactor to shared method
+ private static bool IsHostIPEndPointStyle(string host)
+ => string.IsNullOrEmpty(host) ? false : IPAddress.TryParse(host, out _);
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/DirectoryClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/DirectoryClient.cs
new file mode 100644
index 000000000000..75c5783a4339
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/DirectoryClient.cs
@@ -0,0 +1,888 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Threading;
+using System.Threading.Tasks;
+using Azure.Core;
+using Azure.Core.Pipeline;
+using Azure.Storage.Files.DataLake.Models;
+using Metadata = System.Collections.Generic.IDictionary;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// A DirectoryClient represents a URI to the Azure DataLake service allowing you to manipulate a directory.
+ ///
+ public class DirectoryClient : PathClient
+ {
+ ///
+ /// The name of the directory.
+ ///
+ private string _name;
+
+ ///
+ /// Gets the name of the directory.
+ ///
+ public virtual string Name
+ {
+ get
+ {
+ SetNameFieldsIfNull();
+ return _name;
+ }
+ }
+
+ #region ctors
+ ///
+ /// Initializes a new instance of the
+ /// class for mocking.
+ ///
+ protected DirectoryClient()
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the directory that includes the
+ /// name of the account, the name of the file system, and the path of the
+ /// directory.
+ ///
+ ///
+ /// Optional that define the transport
+ /// pipeline policies for authentication, retries, etc., that are
+ /// applied to every request.
+ ///
+ public DirectoryClient(Uri directoryUri, DataLakeClientOptions options = default)
+ : this(directoryUri, (HttpPipelinePolicy)null, options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the directory that includes the
+ /// name of the account, the name of the file system, and the path of the
+ /// directory.
+ ///
+ ///
+ /// The shared key credential used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public DirectoryClient(Uri directoryUri, StorageSharedKeyCredential credential, DataLakeClientOptions options = default)
+ : this(directoryUri, credential.AsPolicy(), options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the directory that includes the
+ /// name of the account, the name of the file system, and the path of the
+ /// directory.
+ ///
+ ///
+ /// An optional authentication policy used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ internal DirectoryClient(Uri directoryUri, HttpPipelinePolicy authentication, DataLakeClientOptions options)
+ : base(directoryUri, authentication, options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the directory that includes the
+ /// name of the account, the name of the file system, and the path of the
+ /// directory.
+ ///
+ ///
+ /// The transport pipeline used to send every request.
+ ///
+ internal DirectoryClient(Uri directoryUri, HttpPipeline pipeline) : base(directoryUri, pipeline)
+ {
+ }
+ #endregion ctors
+
+ ///
+ /// Creates a new object by appending
+ /// to the end of . The
+ /// new uses the same request policy
+ /// pipeline as the .
+ ///
+ /// The name of the file.
+ /// A new instance.
+ public virtual FileClient GetFileClient(string fileName)
+ => new FileClient(Uri.AppendToPath(fileName), Pipeline);
+
+ ///
+ /// Creates a new object by appending
+ /// to the end of .
+ /// The new uses the same request policy
+ /// pipeline as the .
+ ///
+ /// The name of the subdirectory.
+ /// A new instance.
+ public virtual DirectoryClient GetSubDirectoryClient(string subdirectoryName)
+ => new DirectoryClient(Uri.AppendToPath(subdirectoryName), Pipeline);
+
+ ///
+ /// Sets the various name fields if they are currently null.
+ ///
+ protected override void SetNameFieldsIfNull()
+ {
+ base.SetNameFieldsIfNull();
+
+ if (_name == null)
+ {
+ var builder = new DataLakeUriBuilder(Uri);
+ _name = builder.LastDirectoryOrFileName;
+ }
+ }
+
+ #region Create
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Create(
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ Create(
+ PathResourceType.Directory,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken);
+
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateAsync(
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await CreateAsync(
+ PathResourceType.Directory,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Create
+
+ #region Delete
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Delete(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ base.Delete(
+ recursive: true,
+ conditions,
+ cancellationToken);
+
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteAsync(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await base.DeleteAsync(
+ recursive: true,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Delete
+
+ #region Move
+ ///
+ /// The operation renames a Directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The destination path to rename the path to.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the source on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public new virtual Response Rename(
+ string destinationPath,
+ DataLakeRequestConditions destConditions = default,
+ DataLakeRequestConditions sourceConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = base.Rename(
+ destinationPath,
+ destConditions,
+ sourceConditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ new DirectoryClient(response.Value.DfsUri, response.Value.Pipeline),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation renames a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The destination path to rename the path to.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the source on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public new virtual async Task> RenameAsync(
+ string destinationPath,
+ DataLakeRequestConditions destConditions = default,
+ DataLakeRequestConditions sourceConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await base.RenameAsync(
+ destinationPath,
+ destConditions,
+ sourceConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new DirectoryClient(response.Value.DfsUri, response.Value.Pipeline),
+ response.GetRawResponse());
+ }
+ #endregion Move
+
+ #region Create File
+ ///
+ /// The operation creates a file in this directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The name of the file to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response CreateFile(
+ string fileName,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ FileClient fileClient = GetFileClient(fileName);
+
+ Response response = fileClient.Create(
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ fileClient,
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation creates a new file in this directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Name of the file to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateFileAsync(
+ string fileName,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ FileClient fileClient = GetFileClient(fileName);
+
+ Response response = await fileClient.CreateAsync(
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ fileClient,
+ response.GetRawResponse());
+ }
+ #endregion Create File
+
+ #region Delete File
+ ///
+ /// The operation deletes a file
+ /// in this directory.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The name of the file to delete.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response DeleteFile(
+ string fileName,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ GetFileClient(fileName).Delete(
+ conditions,
+ cancellationToken);
+
+ ///
+ /// The operation deletes a file
+ /// in this directory.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The name of the file to delete.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteFileAsync(
+ string fileName,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await GetFileClient(fileName).DeleteAsync(
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Delete File
+
+ #region Create Sub Directory
+ ///
+ /// The operation creates a sub directory in this directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The path to the directory to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response CreateSubDirectory(
+ string path,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ DirectoryClient directoryClient = GetSubDirectoryClient(path);
+
+ Response response = directoryClient.Create(
+ PathResourceType.Directory,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ directoryClient,
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation creates a sub directory in this directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The path to the directory to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateSubDirectoryAsync(
+ string path,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ DirectoryClient directoryClient = GetSubDirectoryClient(path);
+
+ Response response = await directoryClient.CreateAsync(
+ PathResourceType.Directory,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ directoryClient,
+ response.GetRawResponse());
+ }
+ #endregion Create Sub Directory
+
+ #region Delete Sub Directory
+ ///
+ /// The deletes a sub directory in this directory.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The path to the directory to delete.
+ ///
+ ///
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited.
+ /// If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header.
+ /// When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete
+ /// operation to continue deleting the directory.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response DeleteSubDirectory(
+ string path,
+ string continuation = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ GetSubDirectoryClient(path).Delete(
+ recursive: true,
+ conditions,
+ cancellationToken);
+
+ ///
+ /// The deletes a sub directory in this directory.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The path to the directory to delete.
+ ///
+ ///
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited.
+ /// If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header.
+ /// When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete
+ /// operation to continue deleting the directory.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteSubDirectoryAsync(
+ string path,
+ string continuation = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await GetSubDirectoryClient(path).DeleteAsync(
+ recursive: true,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Delete Sub Directory
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/ErrorExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/ErrorExtensions.cs
new file mode 100644
index 000000000000..4ba595bfc49e
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/ErrorExtensions.cs
@@ -0,0 +1,37 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Globalization;
+using System.Text;
+using System.Text.Json;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class ErrorExtensions
+ {
+ internal static Exception CreateException(this string jsonMessage, Response response)
+ {
+ if (string.IsNullOrWhiteSpace(jsonMessage))
+ {
+ return new RequestFailedException(
+ status: response.Status,
+ errorCode: response.Status.ToString(CultureInfo.InvariantCulture),
+ message: response.ReasonPhrase,
+ innerException: new Exception());
+ }
+ else
+ {
+ Dictionary> errorDictionary
+ = JsonSerializer.Deserialize>>(jsonMessage);
+ return new RequestFailedException(
+ status: response.Status,
+ errorCode: errorDictionary[Constants.DataLake.ErrorKey][Constants.DataLake.ErrorCodeKey],
+ message: errorDictionary[Constants.DataLake.ErrorKey][Constants.DataLake.ErrorMessageKey],
+ innerException: new Exception());
+ }
+ }
+
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/FileClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/FileClient.cs
new file mode 100644
index 000000000000..4338ea24719e
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/FileClient.cs
@@ -0,0 +1,1080 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.IO;
+using System.Threading;
+using System.Threading.Tasks;
+using Azure.Core;
+using Azure.Core.Pipeline;
+using Azure.Storage.Files.DataLake.Models;
+using Metadata = System.Collections.Generic.IDictionary;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// The allows you to manipulate Azure Data Lake files.
+ ///
+ public class FileClient : PathClient
+ {
+ ///
+ /// The name of the file.
+ ///
+ private string _name;
+
+ ///
+ /// Gets the name of the file.
+ ///
+ public virtual string Name
+ {
+ get
+ {
+ SetNameFieldsIfNull();
+ return _name;
+ }
+ }
+
+ #region ctors
+ ///
+ /// Initializes a new instance of the
+ /// class for mocking.
+ ///
+ protected FileClient()
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ ///
+ /// A referencing the file that includes the
+ /// name of the account, the name of the file system, and the path of the
+ /// file.
+ ///
+ ///
+ /// Optional that define the transport
+ /// pipeline policies for authentication, retries, etc., that are
+ /// applied to every request.
+ ///
+ public FileClient(Uri fileUri, DataLakeClientOptions options = default)
+ : this(fileUri, (HttpPipelinePolicy)null, options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ ///
+ /// A referencing the file that includes the
+ /// name of the account, the name of the file system, and the path of the
+ /// file.
+ ///
+ ///
+ /// The shared key credential used to sign requests.
+ ///
+ ///
+ /// Optional that define the transport
+ /// pipeline policies for authentication, retries, etc., that are
+ /// applied to every request.
+ ///
+ public FileClient(Uri fileUri, StorageSharedKeyCredential credential, DataLakeClientOptions options = default)
+ : this(fileUri, credential.AsPolicy(), options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the file that includes the
+ /// name of the account, the name of the file system, and the path of the
+ /// file.
+ ///
+ ///
+ /// An optional authentication policy used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ internal FileClient(Uri fileUri, HttpPipelinePolicy authentication, DataLakeClientOptions options)
+ : base(fileUri, authentication, options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ ///
+ /// A referencing the file that includes the
+ /// name of the account, the name of the file system, and the path of the file.
+ ///
+ ///
+ /// The transport pipeline used to send every request.
+ ///
+ internal FileClient(Uri fileUri, HttpPipeline pipeline) : base(fileUri, pipeline)
+ {
+ }
+ #endregion ctors
+
+ ///
+ /// Sets the various name fields if they are currently null.
+ ///
+ protected override void SetNameFieldsIfNull()
+ {
+ base.SetNameFieldsIfNull();
+ if (_name == null)
+ {
+ var builder = new DataLakeUriBuilder(Uri);
+ _name = builder.LastDirectoryOrFileName;
+ }
+ }
+
+ #region Create
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Create(
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ Create(
+ PathResourceType.File,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken);
+
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateAsync(
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await CreateAsync(
+ PathResourceType.File,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Create
+
+ #region Delete
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Delete(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ => base.Delete(
+ recursive: null,
+ conditions,
+ cancellationToken);
+
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteAsync(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ => await base.DeleteAsync(
+ recursive: null,
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Delete
+
+ #region Move
+ ///
+ /// The operation renames a Directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The destination path to rename the path to.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the source on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public new virtual Response Rename(
+ string destinationPath,
+ DataLakeRequestConditions destConditions = default,
+ DataLakeRequestConditions sourceConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = base.Rename(
+ destinationPath,
+ destConditions,
+ sourceConditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ new FileClient(response.Value.DfsUri, response.Value.Pipeline),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation renames a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The destination path to rename the path to.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the source on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public new virtual async Task> RenameAsync(
+ string destinationPath,
+ DataLakeRequestConditions destConditions = default,
+ DataLakeRequestConditions sourceConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await base.RenameAsync(
+ destinationPath,
+ destConditions,
+ sourceConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new FileClient(response.Value.DfsUri, response.Value.Pipeline),
+ response.GetRawResponse());
+ }
+ #endregion Move
+
+ #region Append Data
+ ///
+ /// The operation uploads data to be appended to a file. Data can only be appended to a file.
+ /// To apply perviously uploaded data to a file, call Flush Data.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// A containing the content to upload.
+ ///
+ ///
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file.
+ /// It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file.
+ /// The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file.
+ /// To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length
+ /// of the file after all data has been written, and there must not be a request entity body included with the request.
+ ///
+ ///
+ /// This hash is used to verify the integrity of the request content during transport. When this header is specified,
+ /// the storage service compares the hash of the content that has arrived with this header value. If the two hashes do not match,
+ /// the operation will fail with error code 400 (Bad Request). Note that this MD5 hash is not stored with the file. This header is
+ /// associated with the request content, and not with the stored content of the file itself.
+ ///
+ ///
+ /// Optional lease id.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the state
+ /// of the updated file.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Append(
+ Stream content,
+ long offset,
+ byte[] contentHash = default,
+ string leaseId = default,
+ CancellationToken cancellationToken = default) =>
+ AppendInternal(
+ content,
+ offset,
+ contentHash,
+ leaseId,
+ async: false,
+ cancellationToken)
+ .EnsureCompleted();
+
+ ///
+ /// The operation uploads data to be appended to a file. Data can only be appended to a file.
+ /// To apply perviously uploaded data to a file, call Flush Data.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// A containing the content to upload.
+ ///
+ ///
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file.
+ /// It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file.
+ /// The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file.
+ /// To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length
+ /// of the file after all data has been written, and there must not be a request entity body included with the request.
+ ///
+ ///
+ /// This hash is used to verify the integrity of the request content during transport. When this header is specified,
+ /// the storage service compares the hash of the content that has arrived with this header value. If the two hashes do not match,
+ /// the operation will fail with error code 400 (Bad Request). Note that this MD5 hash is not stored with the file. This header is
+ /// associated with the request content, and not with the stored content of the file itself.
+ ///
+ ///
+ /// Optional lease id.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the state
+ /// of the updated file.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task AppendAsync(
+ Stream content,
+ long offset,
+ byte[] contentHash = default,
+ string leaseId = default,
+ CancellationToken cancellationToken = default) =>
+ await AppendInternal(
+ content,
+ offset,
+ contentHash,
+ leaseId,
+ async: true,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ ///
+ /// The operation uploads data to be appended to a file. Data can only be appended to a file.
+ /// To apply perviously uploaded data to a file, call Flush Data.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// A containing the content to upload.
+ ///
+ ///
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file.
+ /// It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file.
+ /// The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file.
+ /// To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length
+ /// of the file after all data has been written, and there must not be a request entity body included with the request.
+ ///
+ ///
+ /// This hash is used to verify the integrity of the request content during transport. When this header is specified,
+ /// the storage service compares the hash of the content that has arrived with this header value. If the two hashes do not match,
+ /// the operation will fail with error code 400 (Bad Request). Note that this MD5 hash is not stored with the file. This header is
+ /// associated with the request content, and not with the stored content of the file itself.
+ ///
+ ///
+ /// Optional lease id.
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the state
+ /// of the updated file.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ private async Task AppendInternal(
+ Stream content,
+ long? offset,
+ byte[] contentHash,
+ string leaseId,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(FileClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(FileClient),
+ message:
+ $"{nameof(Uri)}: {Uri}\n" +
+ $"{nameof(offset)}: {offset}\n" +
+ $"{nameof(leaseId)}: {leaseId}\n");
+ try
+ {
+ Response response = await DataLakeRestClient.Path.AppendDataAsync(
+ clientDiagnostics: ClientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: DfsUri,
+ body: content,
+ position: offset,
+ contentLength: content.Length,
+ transactionalContentHash: contentHash,
+ leaseId: leaseId,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return response.GetRawResponse();
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(FileClient));
+ }
+ }
+ }
+ #endregion Append Data
+
+ #region Flush Data
+ ///
+ /// The operation flushes (writes) previously
+ /// appended data to a file.
+ ///
+ ///
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file.
+ /// It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file.
+ /// The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written,
+ /// to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and
+ /// equal to the length of the file after all data has been written, and there must not be a request entity body included
+ /// with the request.
+ ///
+ ///
+ /// If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+ /// after the flush operation. The default is false. Data at offsets less than the specified position are written to the
+ /// file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future
+ /// flush operation.
+ ///
+ ///
+ /// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled,
+ /// a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the
+ /// difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter
+ /// is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the
+ /// flush operation completes successfully, the service raises a file change notification with a property indicating that
+ /// this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the
+ /// file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that
+ /// the file stream has been closed."
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the file.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the flush of this file.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Flush(
+ long position,
+ bool? retainUncommittedData = default,
+ bool? close = default,
+ PathHttpHeaders? httpHeaders = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ FlushInternal(
+ position,
+ retainUncommittedData,
+ close,
+ httpHeaders,
+ conditions,
+ async: false,
+ cancellationToken)
+ .EnsureCompleted();
+
+ ///
+ /// The operation flushes (writes) previously
+ /// appended data to a file.
+ ///
+ ///
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file.
+ /// It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file.
+ /// The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written,
+ /// to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and
+ /// equal to the length of the file after all data has been written, and there must not be a request entity body included
+ /// with the request.
+ ///
+ ///
+ /// If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+ /// after the flush operation. The default is false. Data at offsets less than the specified position are written to the
+ /// file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future
+ /// flush operation.
+ ///
+ ///
+ /// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled,
+ /// a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the
+ /// difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter
+ /// is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the
+ /// flush operation completes successfully, the service raises a file change notification with a property indicating that
+ /// this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the
+ /// file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that
+ /// the file stream has been closed."
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the file.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the flush of this file.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> FlushAsync(
+ long position,
+ bool? retainUncommittedData = default,
+ bool? close = default,
+ PathHttpHeaders? httpHeaders = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await FlushInternal(
+ position,
+ retainUncommittedData,
+ close,
+ httpHeaders,
+ conditions,
+ async: true,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ ///
+ /// The operation flushes (writes) previously
+ /// appended data to a file.
+ ///
+ ///
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file.
+ /// It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file.
+ /// The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written,
+ /// to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and
+ /// equal to the length of the file after all data has been written, and there must not be a request entity body included
+ /// with the request.
+ ///
+ ///
+ /// If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+ /// after the flush operation. The default is false. Data at offsets less than the specified position are written to the
+ /// file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future
+ /// flush operation.
+ ///
+ ///
+ /// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled,
+ /// a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the
+ /// difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter
+ /// is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the
+ /// flush operation completes successfully, the service raises a file change notification with a property indicating that
+ /// this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the
+ /// file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that
+ /// the file stream has been closed."
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the file.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the flush of this file.
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ private async Task> FlushInternal(
+ long position,
+ bool? retainUncommittedData,
+ bool? close,
+ PathHttpHeaders? httpHeaders,
+ DataLakeRequestConditions conditions,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(FileClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(FileClient),
+ message:
+ $"{nameof(Uri)}: {Uri}");
+
+ try
+ {
+ Response response = await DataLakeRestClient.Path.FlushDataAsync(
+ clientDiagnostics: ClientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: DfsUri,
+ position: position,
+ retainUncommittedData: retainUncommittedData,
+ close: close,
+ contentLength: 0,
+ contentHash: httpHeaders?.ContentHash,
+ leaseId: conditions?.LeaseId,
+ cacheControl: httpHeaders?.CacheControl,
+ contentType: httpHeaders?.ContentType,
+ contentDisposition: httpHeaders?.ContentDisposition,
+ contentEncoding: httpHeaders?.ContentEncoding,
+ contentLanguage: httpHeaders?.ContentLanguage,
+ ifMatch: conditions?.IfMatch,
+ ifNoneMatch: conditions?.IfNoneMatch,
+ ifModifiedSince: conditions?.IfModifiedSince,
+ ifUnmodifiedSince: conditions?.IfUnmodifiedSince,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new PathInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(FileClient));
+ }
+ }
+ }
+ #endregion
+
+ #region Read Data
+ ///
+ /// The operation downloads a file from
+ /// the service, including its metadata and properties.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// A describing the
+ /// downloaded blob. contains
+ /// the blob's data.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+#pragma warning disable AZC0002 // Client method should have cancellationToken as the last optional parameter
+ [ForwardsClientCalls]
+ public virtual Response Read()
+ {
+ Response response = _blockBlobClient.Download();
+
+ return Response.FromValue(
+ response.Value.ToFileDownloadInfo(),
+ response.GetRawResponse());
+ }
+#pragma warning restore AZC0002 // Client method should have cancellationToken as the last optional parameter
+
+ ///
+ /// The operation downloads a file from
+ /// the service, including its metadata and properties.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// A describing the
+ /// downloaded blob. contains
+ /// the file's data.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+#pragma warning disable AZC0002 // Client method should have cancellationToken as the last optional parameter
+ [ForwardsClientCalls]
+ public virtual async Task> ReadAsync()
+ {
+ Response response
+ = await _blockBlobClient.DownloadAsync(CancellationToken.None).ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToFileDownloadInfo(),
+ response.GetRawResponse());
+ }
+#pragma warning restore AZC0002 // Client method should have cancellationToken as the last optional parameter
+
+ ///
+ /// The operation downloads a file from
+ /// the service, including its metadata and properties.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// downloaded blob. contains
+ /// the blob's data.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Read(
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blockBlobClient.Download(cancellationToken);
+
+ return Response.FromValue(
+ response.Value.ToFileDownloadInfo(),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation downloads a file from
+ /// the service, including its metadata and properties.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// downloaded blob. contains
+ /// the file's data.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> ReadAsync(
+ CancellationToken cancellationToken = default)
+ {
+ Response response
+ = await _blockBlobClient.DownloadAsync(cancellationToken).ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToFileDownloadInfo(),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The
+ /// operation downloads a file from the service, including its metadata
+ /// and properties.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// If provided, only donwload the bytes of the file in the specified
+ /// range. If not provided, download the entire file.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// donwloading this file.
+ ///
+ ///
+ /// When set to true and specified together with the ,
+ /// the service returns the MD5 hash for the range, as long as the
+ /// range is less than or equal to 4 MB in size. If this value is
+ /// specified without or set to true when the
+ /// range exceeds 4 MB in size, a
+ /// is thrown.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// downloaded file. contains
+ /// the file's data.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Read(
+ HttpRange range = default,
+ DataLakeRequestConditions conditions = default,
+ bool rangeGetContentHash = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blockBlobClient.Download(
+ range: range,
+ conditions: conditions,
+ rangeGetContentHash: rangeGetContentHash,
+ cancellationToken: cancellationToken);
+
+ return Response.FromValue(
+ response.Value.ToFileDownloadInfo(),
+ response.GetRawResponse());
+ }
+ ///
+ /// The
+ /// operation downloads a file from the service, including its metadata
+ /// and properties.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// If provided, only donwload the bytes of the file in the specified
+ /// range. If not provided, download the entire file.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// donwloading this file.
+ ///
+ ///
+ /// When set to true and specified together with the ,
+ /// the service returns the MD5 hash for the range, as long as the
+ /// range is less than or equal to 4 MB in size. If this value is
+ /// specified without or set to true when the
+ /// range exceeds 4 MB in size, a
+ /// is thrown.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// downloaded file. contains
+ /// the file's data.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> ReadAsync(
+ HttpRange range = default,
+ DataLakeRequestConditions conditions = default,
+ bool rangeGetContentHash = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blockBlobClient.DownloadAsync(
+ range: range,
+ conditions: conditions,
+ rangeGetContentHash: rangeGetContentHash,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToFileDownloadInfo(),
+ response.GetRawResponse());
+ }
+ #endregion Read Data
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/FileSystemClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/FileSystemClient.cs
new file mode 100644
index 000000000000..452ebd3a5e89
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/FileSystemClient.cs
@@ -0,0 +1,1241 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Threading;
+using System.Threading.Tasks;
+using System.Linq;
+using Azure.Core;
+using Azure.Core.Pipeline;
+using Azure.Storage.Blobs;
+using Azure.Storage.Blobs.Models;
+using Azure.Storage.Files.DataLake.Models;
+using Metadata = System.Collections.Generic.IDictionary;
+using System.Text.Json;
+using System.Collections.Generic;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// The allows you to manipulate Azure
+ /// Data Lake file systems and their directories and files.
+ ///
+ public class FileSystemClient
+ {
+ ///
+ /// A assoicated with the file system.
+ ///
+ internal readonly BlobContainerClient _containerClient;
+
+ ///
+ /// ContainerClient
+ ///
+ internal virtual BlobContainerClient ContainerClient => _containerClient;
+
+ ///
+ /// The file systems's user-provided endpoint.
+ ///
+ private readonly Uri _uri;
+
+ ///
+ /// The file system's blob endpoint.
+ ///
+ private readonly Uri _blobUri;
+
+ ///
+ /// The path's dfs endpoint.
+ ///
+ private readonly Uri _dfsUri;
+
+ ///
+ /// Gets the file systems's primary endpoint.
+ ///
+ public virtual Uri Uri => _uri;
+
+ ///
+ /// The transport pipeline used to send
+ /// every request.
+ ///
+ private readonly HttpPipeline _pipeline;
+
+ ///
+ /// Gets the transport pipeline used to send
+ /// every request.
+ ///
+ protected virtual HttpPipeline Pipeline => _pipeline;
+
+ ///
+ /// The Storage account name corresponding to the share client.
+ ///
+ private string _accountName;
+
+ ///
+ /// Gets the Storage account name corresponding to the share client.
+ ///
+ public virtual string AccountName
+ {
+ get
+ {
+ SetNameFieldsIfNull();
+ return _accountName;
+ }
+ }
+
+ ///
+ /// The name of the file system.
+ ///
+ private string _name;
+
+ ///
+ /// Gets the name of the file system.
+ ///
+ public virtual string Name
+ {
+ get
+ {
+ SetNameFieldsIfNull();
+ return _name;
+ }
+ }
+
+ ///
+ /// The instance used to create diagnostic scopes
+ /// every request.
+ ///
+ private readonly ClientDiagnostics _clientDiagnostics;
+
+ ///
+ /// The instance used to create diagnostic scopes
+ /// every request.
+ ///
+ internal virtual ClientDiagnostics ClientDiagnostics => _clientDiagnostics;
+
+ #region ctors
+ ///
+ /// Initializes a new instance of the
+ /// class for mocking.
+ ///
+ protected FileSystemClient()
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the share that includes the
+ /// name of the account and the name of the file system.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public FileSystemClient(Uri fileSystemUri, DataLakeClientOptions options = default)
+ : this(fileSystemUri, (HttpPipelinePolicy)null, options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the share that includes the
+ /// name of the account and the name of the file system.
+ ///
+ ///
+ /// The shared key credential used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public FileSystemClient(Uri fileSystemUri, StorageSharedKeyCredential credential, DataLakeClientOptions options = default)
+ : this(fileSystemUri, credential.AsPolicy(), options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the file system that includes the
+ /// name of the account and the name of the file system.
+ ///
+ ///
+ /// The token credential used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public FileSystemClient(Uri fileSystemUri, TokenCredential credential, DataLakeClientOptions options = default)
+ : this(fileSystemUri, credential.AsPolicy(), options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the file system that includes the
+ /// name of the account and the name of the file system.
+ ///
+ ///
+ /// An optional authentication policy used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ internal FileSystemClient(Uri fileSystemUri, HttpPipelinePolicy authentication, DataLakeClientOptions options)
+ {
+ _uri = fileSystemUri;
+ _blobUri = GetBlobUri(fileSystemUri);
+ _dfsUri = GetDfsUri(fileSystemUri);
+ _pipeline = options.Build(authentication);
+ _clientDiagnostics = new ClientDiagnostics(options);
+ _containerClient = new BlobContainerClient(_blobUri, _pipeline, _clientDiagnostics, null);
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the file system that includes the
+ /// name of the account and the name of the file system.
+ ///
+ ///
+ /// The transport pipeline used to send every request.
+ ///
+ /// ///
+ internal FileSystemClient(Uri fileSystemUri, HttpPipeline pipeline, ClientDiagnostics clientDiagnostics)
+ {
+ _uri = fileSystemUri;
+ _blobUri = GetBlobUri(fileSystemUri);
+ _dfsUri = GetDfsUri(fileSystemUri);
+ _pipeline = pipeline;
+ _clientDiagnostics = clientDiagnostics;
+ _containerClient = new BlobContainerClient(_blobUri, pipeline, clientDiagnostics, null);
+ }
+ #endregion ctors
+
+ ///
+ /// Create a new object by appending
+ /// to the end of . The
+ /// new uses the same request policy
+ /// pipeline as the .
+ ///
+ /// The name of the directory.
+ /// A new instance.
+ public virtual DirectoryClient GetDirectoryClient(string directoryName)
+ => new DirectoryClient(Uri.AppendToPath(directoryName), Pipeline);
+
+ ///
+ /// Create a new object by appending
+ /// to the end of . The
+ /// new uses the same request policy
+ /// pipeline as the .
+ ///
+ /// The name of the directory.
+ /// A new instance.
+ public virtual FileClient GetFileClient(string fileName)
+ => new FileClient(Uri.AppendToPath(fileName), Pipeline);
+
+ ///
+ /// Gets the blob Uri.
+ ///
+ private static Uri GetBlobUri(Uri uri)
+ {
+ Uri blobUri;
+ if (uri.Host.Contains(Constants.DataLake.DfsUriSuffix))
+ {
+ UriBuilder uriBuilder = new UriBuilder(uri);
+ uriBuilder.Host = uriBuilder.Host.Replace(
+ Constants.DataLake.DfsUriSuffix,
+ Constants.DataLake.BlobUriSuffix);
+ blobUri = uriBuilder.Uri;
+ }
+ else
+ {
+ blobUri = uri;
+ }
+ return blobUri;
+ }
+
+ ///
+ /// Gets the dfs Uri.
+ ///
+ private static Uri GetDfsUri(Uri uri)
+ {
+ Uri dfsUri;
+ if (uri.Host.Contains(Constants.DataLake.BlobUriSuffix))
+ {
+ UriBuilder uriBuilder = new UriBuilder(uri);
+ uriBuilder.Host = uriBuilder.Host.Replace(
+ Constants.DataLake.BlobUriSuffix,
+ Constants.DataLake.DfsUriSuffix);
+ dfsUri = uriBuilder.Uri;
+ }
+ else
+ {
+ dfsUri = uri;
+ }
+ return dfsUri;
+ }
+
+ ///
+ /// Sets the various name fields if they are currently null.
+ ///
+ private void SetNameFieldsIfNull()
+ {
+ if (_name == null || _accountName == null)
+ {
+ var builder = new DataLakeUriBuilder(Uri);
+ _name = builder.FileSystemName;
+ _accountName = builder.AccountName;
+ }
+ }
+
+ #region Create
+ ///
+ /// The operation creates a new file system
+ /// under the specified account. If the container with the same name
+ /// already exists, the operation fails.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optionally specifies whether data in the container may be accessed
+ /// publicly and the level of access.
+ /// specifies full public read access for container and blob data.
+ /// Clients can enumerate blobs within the container via anonymous
+ /// request, but cannot enumerate containers within the storage
+ /// account. specifies public
+ /// read access for blobs. Blob data within this container can be
+ /// read via anonymous request, but file system data is not available.
+ /// Clients cannot enumerate blobs within the file system via anonymous
+ /// request. specifies that the
+ /// file system data is private to the account owner.
+ ///
+ ///
+ /// Optional custom metadata to set for this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the newly
+ /// created container.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Create(
+ Models.PublicAccessType publicAccessType = Models.PublicAccessType.None,
+ Metadata metadata = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response containerResponse = _containerClient.Create(
+ (Blobs.Models.PublicAccessType)publicAccessType,
+ metadata,
+ cancellationToken);
+
+ return Response.FromValue(
+ new FileSystemInfo()
+ {
+ ETag = containerResponse.Value.ETag,
+ LastModified = containerResponse.Value.LastModified
+ },
+ containerResponse.GetRawResponse());
+ }
+
+ ///
+ /// The operation creates a new file system
+ /// under the specified account. If the file system with the same name
+ /// already exists, the operation fails.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optionally specifies whether data in the file system may be accessed
+ /// publicly and the level of access.
+ /// specifies full public read access for file system and blob data.
+ /// Clients can enumerate blobs within the file system via anonymous
+ /// request, but cannot enumerate file system within the storage
+ /// account. specifies public
+ /// read access for blobs. Blob data within this file system can be
+ /// read via anonymous request, but file system data is not available.
+ /// Clients cannot enumerate blobs within the file system via anonymous
+ /// request. specifies that the
+ /// file system data is private to the account owner.
+ ///
+ ///
+ /// Optional custom metadata to set for this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the newly
+ /// created file system.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateAsync(
+ Models.PublicAccessType publicAccessType = Models.PublicAccessType.None,
+ Metadata metadata = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response containerResponse = await _containerClient.CreateAsync(
+ (Blobs.Models.PublicAccessType)publicAccessType,
+ metadata,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new FileSystemInfo()
+ {
+ ETag = containerResponse.Value.ETag,
+ LastModified = containerResponse.Value.LastModified
+ },
+ containerResponse.GetRawResponse());
+ }
+ #endregion Create
+
+ #region Delete
+ ///
+ /// The operation marks the specified
+ /// file system for deletion. The file system and any blobs contained
+ /// within it are later deleted during garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the deletion of this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A if successful.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Delete(
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default) =>
+ _containerClient.Delete(
+ accessConditions,
+ cancellationToken);
+
+ ///
+ /// The operation marks the specified
+ /// file system for deletion. The file system and any blobs contained
+ /// within it are later deleted during garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the deletion of this cofile systemntainer.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A if successful.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteAsync(
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default) =>
+ await _containerClient.DeleteAsync(
+ accessConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Delete
+
+ #region GetProperties
+ ///
+ /// The operation returns all
+ /// user-defined metadata and system properties for the specified
+ /// file system. The data returned does not include the file system's
+ /// list of blobs.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on getting the file system's properties.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// file system and its properties.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response GetProperties(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response containerResponse = _containerClient.GetProperties(
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ containerResponse.Value.ToFileSystemItem(),
+ containerResponse.GetRawResponse());
+ }
+
+ ///
+ /// The operation returns all
+ /// user-defined metadata and system properties for the specified
+ /// file system. The data returned does not include the file system's
+ /// list of blobs.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on getting the file system's properties.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// file system and its properties.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> GetPropertiesAsync(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _containerClient.GetPropertiesAsync(
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToFileSystemItem(),
+ response.GetRawResponse());
+ }
+ #endregion GetProperties
+
+ #region SetMetadata
+ ///
+ /// The operation sets one or more
+ /// user-defined name-value pairs for the specified file system.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Custom metadata to set for this file system.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the deletion of this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A if successful.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response SetMetadata(
+ Metadata metadata,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _containerClient.SetMetadata(
+ metadata,
+ accessConditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ new FileSystemInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation sets one or more
+ /// user-defined name-value pairs for the specified file system.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Custom metadata to set for this file system.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the deletion of this file system.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A if successful.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> SetMetadataAsync(
+ Metadata metadata,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _containerClient.SetMetadataAsync(
+ metadata,
+ accessConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new FileSystemInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+ #endregion SetMetadata
+
+ #region List Paths
+ ///
+ /// The operation returns an async sequence
+ /// of paths in this file system. Enumerating the paths may make
+ /// multiple requests to the service while fetching all the values.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies options for listing and filtering the paths.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// An of
+ /// describing the paths in the file system.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ public virtual Pageable ListPaths(
+ GetPathsOptions? options = default,
+ CancellationToken cancellationToken = default) =>
+ new GetPathsAsyncCollection(this, options).ToSyncCollection(cancellationToken);
+
+ ///
+ /// The operation returns an async
+ /// sequence of paths in this file system. Enumerating the paths may
+ /// make multiple requests to the service while fetching all the
+ /// values.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies options for listing and filtering paths.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// An describing the
+ /// paths in the file system.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ public virtual AsyncPageable ListPathsAsync(
+ GetPathsOptions? options = default,
+ CancellationToken cancellationToken = default) =>
+ new GetPathsAsyncCollection(this, options).ToAsyncCollection(cancellationToken);
+
+ ///
+ /// The operation returns a
+ /// single segment of paths in this file system, starting
+ /// from the specified . Use an empty
+ /// to start enumeration from the beginning
+ /// and the if it's not
+ /// empty to make subsequent calls to
+ /// to continue enumerating the paths segment by segment. Blobs are
+ /// ordered lexicographically by name.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Specifies options for listing and filtering paths.
+ ///
+ ///
+ /// The number of paths returned with each invocation is limited. If the number of paths
+ /// to be returned exceeds this limit, a continuation token is returned in the response header
+ /// x-ms-continuation. When a continuation token is returned in the response, it must be specified
+ /// in a subsequent invocation of the list operation to continue listing the paths.
+ ///
+ ///
+ /// An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000,
+ /// the response will include up to 5,000 items.
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing a
+ /// segment of the blobs in the container.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ internal async Task> ListPathsInternal(
+ GetPathsOptions? options,
+ string continuation,
+ int? maxResults,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(FileSystemClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(BlobContainerClient),
+ message:
+ $"{nameof(Uri)}: {Uri}\n" +
+ $"{nameof(continuation)}: {continuation}\n" +
+ $"{nameof(maxResults)}: {maxResults})");
+ try
+ {
+ Response response = await DataLakeRestClient.FileSystem.ListPathsAsync(
+ clientDiagnostics: _clientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: _dfsUri,
+ continuation: continuation,
+ recursive: options?.Recursive ?? false,
+ maxResults: maxResults,
+ upn: options?.Upn,
+ path: options?.Path,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ string jsonString;
+ using (var reader = new System.IO.StreamReader(response.Value.Body))
+ {
+ jsonString = reader.ReadToEnd();
+ }
+
+ Dictionary>> pathDictionary
+ = JsonSerializer.Deserialize>>>(jsonString);
+
+ return Response.FromValue(
+ new PathSegment()
+ {
+ Continuation = response.Value.Continuation,
+ Paths = pathDictionary["paths"].Select(path => path.ToPathItem())
+ },
+ response.GetRawResponse());
+ ;
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(FileSystemClient));
+ }
+ }
+ }
+ #endregion List Paths
+
+ #region Create Directory
+ ///
+ /// The operation creates a directory in this file system.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The path to the directory to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response CreateDirectory(
+ string path,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ DirectoryClient directoryClient = GetDirectoryClient(path);
+
+ Response response = directoryClient.Create(
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ accessConditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ directoryClient,
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation creates a directory in this file system.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The path to the directory to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateDirectoryAsync(
+ string path,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ DirectoryClient directoryClient = GetDirectoryClient(path);
+
+ Response response = await GetDirectoryClient(path).CreateAsync(
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ accessConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ directoryClient,
+ response.GetRawResponse());
+ }
+ #endregion Create Directory
+
+ #region Delete Directory
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The path to the directory to delete.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response DeleteDirectory(
+ string path,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default) =>
+ GetDirectoryClient(path).Delete(
+ recursive: true,
+ accessConditions,
+ cancellationToken);
+
+ ///
+ /// The deletes a directory in this file system.
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The path to the directory to delete.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteDirectoryAsync(
+ string path,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default) =>
+ await GetDirectoryClient(path).DeleteAsync(
+ recursive: true,
+ accessConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+ #endregion Delete Directory
+
+ #region Create File
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The path to the file to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response CreateFile(
+ string path,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ FileClient fileClient = GetFileClient(path);
+
+ Response response = fileClient.Create(
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ accessConditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ fileClient,
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The creates a file in this file system
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The path to the file to create.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateFileAsync(
+ string path,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ FileClient fileClient = GetFileClient(path);
+
+ Response response = await fileClient.CreateAsync(
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ accessConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ fileClient,
+ response.GetRawResponse());
+ }
+ #endregion Create File
+
+ #region Delete File
+ ///
+ /// The deletes a file in this file system.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The path to the file to delete.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response DeleteFile(
+ string path,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default) =>
+ GetFileClient(path).Delete(
+ accessConditions,
+ cancellationToken);
+
+ ///
+ /// The deletes a file in this file system.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The path to the file to delete.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task DeleteFileAsync(
+ string path,
+ DataLakeRequestConditions accessConditions = default,
+ CancellationToken cancellationToken = default)
+ => await GetFileClient(path).DeleteAsync(
+ accessConditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ #endregion Delete File
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/DataLakeRestClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/DataLakeRestClient.cs
new file mode 100644
index 000000000000..caacd4fbae75
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Generated/DataLakeRestClient.cs
@@ -0,0 +1,4426 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+// This file was automatically generated. Do not edit.
+
+#pragma warning disable IDE0016 // Null check can be simplified
+#pragma warning disable IDE0017 // Variable declaration can be inlined
+#pragma warning disable IDE0018 // Object initialization can be simplified
+#pragma warning disable SA1402 // File may only contain a single type
+
+#region Service
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// Azure Data Lake Storage REST API
+ /// Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+ ///
+ internal static partial class DataLakeRestClient
+ {
+ #region Service operations
+ ///
+ /// Service operations for Azure Data Lake Storage REST API
+ ///
+ public static partial class Service
+ {
+ #region Service.ListFileSystemsAsync
+ ///
+ /// List filesystems and their properties in given account.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Filters results to filesystems within the specified prefix.
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000, the response will include up to 5,000 items.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.ServiceListFileSystemsResult}
+ public static async System.Threading.Tasks.ValueTask> ListFileSystemsAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string prefix = default,
+ string continuation = default,
+ int? maxResults = default,
+ string requestId = default,
+ int? timeout = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.ServiceClient.ListFileSystems",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = ListFileSystemsAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ prefix,
+ continuation,
+ maxResults,
+ requestId,
+ timeout))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return ListFileSystemsAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Service.ListFileSystemsAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Filters results to filesystems within the specified prefix.
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000, the response will include up to 5,000 items.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// The Service.ListFileSystemsAsync Message.
+ internal static Azure.Core.HttpMessage ListFileSystemsAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string prefix = default,
+ string continuation = default,
+ int? maxResults = default,
+ string requestId = default,
+ int? timeout = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Get;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("resource", "account", escapeValue: false);
+ if (prefix != null) { _request.Uri.AppendQuery("prefix", prefix); }
+ if (continuation != null) { _request.Uri.AppendQuery("continuation", continuation); }
+ if (maxResults != null) { _request.Uri.AppendQuery("maxResults", maxResults.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Service.ListFileSystemsAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Service.ListFileSystemsAsync Azure.Response{Azure.Storage.Files.DataLake.Models.ServiceListFileSystemsResult}.
+ internal static Azure.Response ListFileSystemsAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ System.Xml.Linq.XDocument _xml = System.Xml.Linq.XDocument.Load(response.ContentStream, System.Xml.Linq.LoadOptions.PreserveWhitespace);
+ Azure.Storage.Files.DataLake.Models.ServiceListFileSystemsResult _value = new Azure.Storage.Files.DataLake.Models.ServiceListFileSystemsResult();
+ _value.Body = Azure.Storage.Files.DataLake.Models.FileSystemList.FromXml(_xml.Root);
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("x-ms-continuation", out _header))
+ {
+ _value.Continuation = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Type", out _header))
+ {
+ _value.ContentType = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 304:
+ {
+ return new Azure.NoBodyResponse(response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Service.ListFileSystemsAsync
+ }
+ #endregion Service operations
+
+ #region FileSystem operations
+ ///
+ /// FileSystem operations for Azure Data Lake Storage REST API
+ ///
+ public static partial class FileSystem
+ {
+ #region FileSystem.CreateAsync
+ ///
+ /// Create a FileSystem rooted at the specified location. If the FileSystem already exists, the operation fails. This operation does not support conditional HTTP requests.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemCreateResult}
+ public static async System.Threading.Tasks.ValueTask> CreateAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ string properties = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.FileSystemClient.Create",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = CreateAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout,
+ properties))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return CreateAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the FileSystem.CreateAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// The FileSystem.CreateAsync Message.
+ internal static Azure.Core.HttpMessage CreateAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ string properties = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Put;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("resource", "filesystem", escapeValue: false);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (properties != null) { _request.Headers.SetValue("x-ms-properties", properties); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the FileSystem.CreateAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The FileSystem.CreateAsync Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemCreateResult}.
+ internal static Azure.Response CreateAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 201:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.FileSystemCreateResult _value = new Azure.Storage.Files.DataLake.Models.FileSystemCreateResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-namespace-enabled", out _header))
+ {
+ _value.NamespaceEnabled = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion FileSystem.CreateAsync
+
+ #region FileSystem.SetPropertiesAsync
+ ///
+ /// Set properties for the FileSystem. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemSetPropertiesResult}
+ public static async System.Threading.Tasks.ValueTask> SetPropertiesAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ string properties = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.FileSystemClient.SetProperties",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = SetPropertiesAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout,
+ properties,
+ ifModifiedSince,
+ ifUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return SetPropertiesAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the FileSystem.SetPropertiesAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The FileSystem.SetPropertiesAsync Message.
+ internal static Azure.Core.HttpMessage SetPropertiesAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ string properties = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Patch;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("resource", "filesystem", escapeValue: false);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (properties != null) { _request.Headers.SetValue("x-ms-properties", properties); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the FileSystem.SetPropertiesAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The FileSystem.SetPropertiesAsync Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemSetPropertiesResult}.
+ internal static Azure.Response SetPropertiesAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.FileSystemSetPropertiesResult _value = new Azure.Storage.Files.DataLake.Models.FileSystemSetPropertiesResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion FileSystem.SetPropertiesAsync
+
+ #region FileSystem.GetPropertiesAsync
+ ///
+ /// All system and user-defined filesystem properties are specified in the response headers.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemGetPropertiesResult}
+ public static async System.Threading.Tasks.ValueTask> GetPropertiesAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.FileSystemClient.GetProperties",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = GetPropertiesAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return GetPropertiesAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the FileSystem.GetPropertiesAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// The FileSystem.GetPropertiesAsync Message.
+ internal static Azure.Core.HttpMessage GetPropertiesAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Head;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("resource", "filesystem", escapeValue: false);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the FileSystem.GetPropertiesAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The FileSystem.GetPropertiesAsync Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemGetPropertiesResult}.
+ internal static Azure.Response GetPropertiesAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.FileSystemGetPropertiesResult _value = new Azure.Storage.Files.DataLake.Models.FileSystemGetPropertiesResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-properties", out _header))
+ {
+ _value.Properties = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-namespace-enabled", out _header))
+ {
+ _value.NamespaceEnabled = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 304:
+ {
+ return new Azure.NoBodyResponse(response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion FileSystem.GetPropertiesAsync
+
+ #region FileSystem.DeleteAsync
+ ///
+ /// Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, attempts to create a filesystem with the same identifier will fail with status code 409 (Conflict), with the service returning additional error information indicating that the filesystem is being deleted. All other operations, including operations on any files or directories within the filesystem, will fail with status code 404 (Not Found) while the filesystem is being deleted. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response
+ public static async System.Threading.Tasks.ValueTask DeleteAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.FileSystemClient.Delete",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = DeleteAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout,
+ ifModifiedSince,
+ ifUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return DeleteAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the FileSystem.DeleteAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The FileSystem.DeleteAsync Message.
+ internal static Azure.Core.HttpMessage DeleteAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Delete;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("resource", "filesystem", escapeValue: false);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the FileSystem.DeleteAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The FileSystem.DeleteAsync Azure.Response.
+ internal static Azure.Response DeleteAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 202:
+ {
+ return response;
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion FileSystem.DeleteAsync
+
+ #region FileSystem.ListPathsAsync
+ ///
+ /// List FileSystem paths and their properties.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Required
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// Optional. Filters results to paths within the specified directory. An error occurs if the directory does not exist.
+ /// An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000, the response will include up to 5,000 items.
+ /// Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned as Azure Active Directory Object IDs. The default value is false. Note that group and application Object IDs are not translated because they do not have unique friendly names.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemListPathsResult}
+ public static async System.Threading.Tasks.ValueTask> ListPathsAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ bool recursive,
+ string requestId = default,
+ int? timeout = default,
+ string continuation = default,
+ string path = default,
+ int? maxResults = default,
+ bool? upn = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.FileSystemClient.ListPaths",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = ListPathsAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ recursive,
+ requestId,
+ timeout,
+ continuation,
+ path,
+ maxResults,
+ upn))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return ListPathsAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the FileSystem.ListPathsAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Required
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// Optional. Filters results to paths within the specified directory. An error occurs if the directory does not exist.
+ /// An optional value that specifies the maximum number of items to return. If omitted or greater than 5,000, the response will include up to 5,000 items.
+ /// Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned as Azure Active Directory Object IDs. The default value is false. Note that group and application Object IDs are not translated because they do not have unique friendly names.
+ /// The FileSystem.ListPathsAsync Message.
+ internal static Azure.Core.HttpMessage ListPathsAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ bool recursive,
+ string requestId = default,
+ int? timeout = default,
+ string continuation = default,
+ string path = default,
+ int? maxResults = default,
+ bool? upn = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Get;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("resource", "filesystem", escapeValue: false);
+
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("recursive", recursive.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (continuation != null) { _request.Uri.AppendQuery("continuation", continuation); }
+ if (path != null) { _request.Uri.AppendQuery("directory", path); }
+ if (maxResults != null) { _request.Uri.AppendQuery("maxResults", maxResults.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (upn != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("upn", upn.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the FileSystem.ListPathsAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The FileSystem.ListPathsAsync Azure.Response{Azure.Storage.Files.DataLake.Models.FileSystemListPathsResult}.
+ internal static Azure.Response ListPathsAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.FileSystemListPathsResult _value = new Azure.Storage.Files.DataLake.Models.FileSystemListPathsResult();
+ _value.Body = response.ContentStream; // You should manually wrap with RetriableStream!
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-continuation", out _header))
+ {
+ _value.Continuation = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 304:
+ {
+ return new Azure.NoBodyResponse(response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion FileSystem.ListPathsAsync
+ }
+ #endregion FileSystem operations
+
+ #region Path operations
+ ///
+ /// Path operations for Azure Data Lake Storage REST API
+ ///
+ public static partial class Path
+ {
+ #region Path.CreateAsync
+ ///
+ /// Create or rename a file or directory. By default, the destination is overwritten and if the destination already exists and has a lease the lease is broken. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). To fail if the destination already exists, use a conditional request with If-None-Match: "*".
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Required only for Create File and Create Directory. The value must be "file" or "directory".
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// Optional. Valid only when namespace is enabled. This parameter determines the behavior of the rename operation. The value must be "legacy" or "posix", and the default value will be "posix".
+ /// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's Content-Disposition header.
+ /// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
+ /// An optional file or directory to be renamed. The value must have the following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties will overwrite the existing properties; otherwise, the existing properties will be preserved. This value must be a URL percent-encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p bitwise and not u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766).
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathCreateResult}
+ public static async System.Threading.Tasks.ValueTask> CreateAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ Azure.Storage.Files.DataLake.Models.PathResourceType? resource = default,
+ string continuation = default,
+ Azure.Storage.Files.DataLake.Models.PathRenameMode? mode = default,
+ string cacheControl = default,
+ string contentEncoding = default,
+ string contentLanguage = default,
+ string contentDisposition = default,
+ string contentType = default,
+ string renameSource = default,
+ string leaseId = default,
+ string sourceLeaseId = default,
+ string properties = default,
+ string permissions = default,
+ string umask = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ Azure.ETag? sourceIfMatch = default,
+ Azure.ETag? sourceIfNoneMatch = default,
+ System.DateTimeOffset? sourceIfModifiedSince = default,
+ System.DateTimeOffset? sourceIfUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.Create",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = CreateAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout,
+ resource,
+ continuation,
+ mode,
+ cacheControl,
+ contentEncoding,
+ contentLanguage,
+ contentDisposition,
+ contentType,
+ renameSource,
+ leaseId,
+ sourceLeaseId,
+ properties,
+ permissions,
+ umask,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince,
+ sourceIfMatch,
+ sourceIfNoneMatch,
+ sourceIfModifiedSince,
+ sourceIfUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return CreateAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.CreateAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Required only for Create File and Create Directory. The value must be "file" or "directory".
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// Optional. Valid only when namespace is enabled. This parameter determines the behavior of the rename operation. The value must be "legacy" or "posix", and the default value will be "posix".
+ /// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's Content-Disposition header.
+ /// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
+ /// An optional file or directory to be renamed. The value must have the following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties will overwrite the existing properties; otherwise, the existing properties will be preserved. This value must be a URL percent-encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p bitwise and not u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766).
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The Path.CreateAsync Message.
+ internal static Azure.Core.HttpMessage CreateAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ Azure.Storage.Files.DataLake.Models.PathResourceType? resource = default,
+ string continuation = default,
+ Azure.Storage.Files.DataLake.Models.PathRenameMode? mode = default,
+ string cacheControl = default,
+ string contentEncoding = default,
+ string contentLanguage = default,
+ string contentDisposition = default,
+ string contentType = default,
+ string renameSource = default,
+ string leaseId = default,
+ string sourceLeaseId = default,
+ string properties = default,
+ string permissions = default,
+ string umask = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ Azure.ETag? sourceIfMatch = default,
+ Azure.ETag? sourceIfNoneMatch = default,
+ System.DateTimeOffset? sourceIfModifiedSince = default,
+ System.DateTimeOffset? sourceIfUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Put;
+ _request.Uri.Reset(resourceUri);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (resource != null) { _request.Uri.AppendQuery("resource", Azure.Storage.Files.DataLake.DataLakeRestClient.Serialization.ToString(resource.Value)); }
+ if (continuation != null) { _request.Uri.AppendQuery("continuation", continuation); }
+ if (mode != null) { _request.Uri.AppendQuery("mode", Azure.Storage.Files.DataLake.DataLakeRestClient.Serialization.ToString(mode.Value)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (cacheControl != null) { _request.Headers.SetValue("x-ms-cache-control", cacheControl); }
+ if (contentEncoding != null) { _request.Headers.SetValue("x-ms-content-encoding", contentEncoding); }
+ if (contentLanguage != null) { _request.Headers.SetValue("x-ms-content-language", contentLanguage); }
+ if (contentDisposition != null) { _request.Headers.SetValue("x-ms-content-disposition", contentDisposition); }
+ if (contentType != null) { _request.Headers.SetValue("x-ms-content-type", contentType); }
+ if (renameSource != null) { _request.Headers.SetValue("x-ms-rename-source", renameSource); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (sourceLeaseId != null) { _request.Headers.SetValue("x-ms-source-lease-id", sourceLeaseId); }
+ if (properties != null) { _request.Headers.SetValue("x-ms-properties", properties); }
+ if (permissions != null) { _request.Headers.SetValue("x-ms-permissions", permissions); }
+ if (umask != null) { _request.Headers.SetValue("x-ms-umask", umask); }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (sourceIfMatch != null) { _request.Headers.SetValue("x-ms-source-if-match", sourceIfMatch.Value.ToString()); }
+ if (sourceIfNoneMatch != null) { _request.Headers.SetValue("x-ms-source-if-none-match", sourceIfNoneMatch.Value.ToString()); }
+ if (sourceIfModifiedSince != null) { _request.Headers.SetValue("x-ms-source-if-modified-since", sourceIfModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (sourceIfUnmodifiedSince != null) { _request.Headers.SetValue("x-ms-source-if-unmodified-since", sourceIfUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.CreateAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.CreateAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathCreateResult}.
+ internal static Azure.Response CreateAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 201:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathCreateResult _value = new Azure.Storage.Files.DataLake.Models.PathCreateResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-continuation", out _header))
+ {
+ _value.Continuation = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Length", out _header))
+ {
+ _value.ContentLength = long.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.CreateAsync
+
+ #region Path.UpdateAsync
+ ///
+ /// Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, sets properties for a file or directory, or sets access control for a file or directory. Data can only be appended to a file. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// The action must be "append" to upload data to be appended to a file, "flush" to flush previously uploaded data to a file, "setProperties" to set the properties of a file or directory, or "setAccessControl" to set the owner, group, permissions, or access control list for a file or directory. Note that Hierarchical Namespace must be enabled for the account in order to use access control. Also note that the Access Control List (ACL) includes permissions for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers are mutually exclusive.
+ /// Initial data
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all data has been written, and there must not be a request entity body included with the request.
+ /// Valid only for flush operations. If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted after the flush operation. The default is false. Data at offsets less than the specified position are written to the file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future flush operation.
+ /// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the flush operation completes successfully, the service raises a file change notification with a property indicating that this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that the file stream has been closed."
+ /// Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes for "Append Data".
+ /// Specify the transactional md5 for the body, to be validated by the service.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's Content-Disposition header.
+ /// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// Optional. The owner of the blob or directory.
+ /// Optional. The owning group of the blob or directory.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ /// Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. Each access control entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format "[scope:][type]:[id]:[permissions]".
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathUpdateResult}
+ public static async System.Threading.Tasks.ValueTask> UpdateAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ Azure.Storage.Files.DataLake.Models.PathUpdateAction action,
+ System.IO.Stream body,
+ string requestId = default,
+ int? timeout = default,
+ long? position = default,
+ bool? retainUncommittedData = default,
+ bool? close = default,
+ long? contentLength = default,
+ byte[] contentHash = default,
+ string leaseId = default,
+ string cacheControl = default,
+ string contentType = default,
+ string contentDisposition = default,
+ string contentEncoding = default,
+ string contentLanguage = default,
+ string properties = default,
+ string owner = default,
+ string group = default,
+ string permissions = default,
+ string acl = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.Update",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = UpdateAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ action,
+ body,
+ requestId,
+ timeout,
+ position,
+ retainUncommittedData,
+ close,
+ contentLength,
+ contentHash,
+ leaseId,
+ cacheControl,
+ contentType,
+ contentDisposition,
+ contentEncoding,
+ contentLanguage,
+ properties,
+ owner,
+ group,
+ permissions,
+ acl,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return UpdateAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.UpdateAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// The action must be "append" to upload data to be appended to a file, "flush" to flush previously uploaded data to a file, "setProperties" to set the properties of a file or directory, or "setAccessControl" to set the owner, group, permissions, or access control list for a file or directory. Note that Hierarchical Namespace must be enabled for the account in order to use access control. Also note that the Access Control List (ACL) includes permissions for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers are mutually exclusive.
+ /// Initial data
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all data has been written, and there must not be a request entity body included with the request.
+ /// Valid only for flush operations. If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted after the flush operation. The default is false. Data at offsets less than the specified position are written to the file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future flush operation.
+ /// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the flush operation completes successfully, the service raises a file change notification with a property indicating that this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that the file stream has been closed."
+ /// Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes for "Append Data".
+ /// Specify the transactional md5 for the body, to be validated by the service.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's Content-Disposition header.
+ /// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. User-defined properties to be stored with the filesystem, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set. If the filesystem exists, any properties not included in the list will be removed. All properties are removed if the header is omitted. To merge new and existing properties, first get all existing properties and the current E-Tag, then make a conditional request with the E-Tag and include values for all properties.
+ /// Optional. The owner of the blob or directory.
+ /// Optional. The owning group of the blob or directory.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ /// Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. Each access control entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format "[scope:][type]:[id]:[permissions]".
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The Path.UpdateAsync Message.
+ internal static Azure.Core.HttpMessage UpdateAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ Azure.Storage.Files.DataLake.Models.PathUpdateAction action,
+ System.IO.Stream body,
+ string requestId = default,
+ int? timeout = default,
+ long? position = default,
+ bool? retainUncommittedData = default,
+ bool? close = default,
+ long? contentLength = default,
+ byte[] contentHash = default,
+ string leaseId = default,
+ string cacheControl = default,
+ string contentType = default,
+ string contentDisposition = default,
+ string contentEncoding = default,
+ string contentLanguage = default,
+ string properties = default,
+ string owner = default,
+ string group = default,
+ string permissions = default,
+ string acl = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+ if (body == null)
+ {
+ throw new System.ArgumentNullException(nameof(body));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Patch;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("action", Azure.Storage.Files.DataLake.DataLakeRestClient.Serialization.ToString(action));
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (position != null) { _request.Uri.AppendQuery("position", position.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (retainUncommittedData != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("retainUncommittedData", retainUncommittedData.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+ if (close != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("close", close.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (contentLength != null) { _request.Headers.SetValue("Content-Length", contentLength.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (contentHash != null) { _request.Headers.SetValue("x-ms-content-md5", System.Convert.ToBase64String(contentHash)); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (cacheControl != null) { _request.Headers.SetValue("x-ms-cache-control", cacheControl); }
+ if (contentType != null) { _request.Headers.SetValue("x-ms-content-type", contentType); }
+ if (contentDisposition != null) { _request.Headers.SetValue("x-ms-content-disposition", contentDisposition); }
+ if (contentEncoding != null) { _request.Headers.SetValue("x-ms-content-encoding", contentEncoding); }
+ if (contentLanguage != null) { _request.Headers.SetValue("x-ms-content-language", contentLanguage); }
+ if (properties != null) { _request.Headers.SetValue("x-ms-properties", properties); }
+ if (owner != null) { _request.Headers.SetValue("x-ms-owner", owner); }
+ if (group != null) { _request.Headers.SetValue("x-ms-group", group); }
+ if (permissions != null) { _request.Headers.SetValue("x-ms-permissions", permissions); }
+ if (acl != null) { _request.Headers.SetValue("x-ms-acl", acl); }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Create the body
+ _request.Content = Azure.Core.RequestContent.Create(body);
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.UpdateAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.UpdateAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathUpdateResult}.
+ internal static Azure.Response UpdateAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathUpdateResult _value = new Azure.Storage.Files.DataLake.Models.PathUpdateResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("Accept-Ranges", out _header))
+ {
+ _value.AcceptRanges = _header;
+ }
+ if (response.Headers.TryGetValue("Cache-Control", out _header))
+ {
+ _value.CacheControl = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Disposition", out _header))
+ {
+ _value.ContentDisposition = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Encoding", out _header))
+ {
+ _value.ContentEncoding = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Language", out _header))
+ {
+ _value.ContentLanguage = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Length", out _header))
+ {
+ _value.ContentLength = long.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("Content-Range", out _header))
+ {
+ _value.ContentRange = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Type", out _header))
+ {
+ _value.ContentType = _header;
+ }
+ if (response.Headers.TryGetValue("Content-MD5", out _header))
+ {
+ _value.ContentMD5 = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-properties", out _header))
+ {
+ _value.Properties = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 202:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathUpdateResult _value = new Azure.Storage.Files.DataLake.Models.PathUpdateResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("Content-MD5", out _header))
+ {
+ _value.ContentMD5 = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.UpdateAsync
+
+ #region Path.LeaseAsync
+ ///
+ /// Create and manage a lease to restrict write and delete access to the path. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// There are five lease actions: "acquire", "break", "change", "renew", and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the lease break period is allowed to elapse, during which time no lease operation except break and release can be performed on the file. When a lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. Use "change" and specify the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or -1 for infinite lease.
+ /// The lease break period duration is optional to break a lease, and specifies the break period of the lease in seconds. The lease break duration must be between 0 and 60 seconds.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathLeaseResult}
+ public static async System.Threading.Tasks.ValueTask> LeaseAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ Azure.Storage.Files.DataLake.Models.PathLeaseAction xMSLeaseAction,
+ string requestId = default,
+ int? timeout = default,
+ int? xMSLeaseDuration = default,
+ int? xMSLeaseBreakPeriod = default,
+ string leaseId = default,
+ string proposedLeaseId = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.Lease",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = LeaseAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ xMSLeaseAction,
+ requestId,
+ timeout,
+ xMSLeaseDuration,
+ xMSLeaseBreakPeriod,
+ leaseId,
+ proposedLeaseId,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return LeaseAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.LeaseAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// There are five lease actions: "acquire", "break", "change", "renew", and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the lease break period is allowed to elapse, during which time no lease operation except break and release can be performed on the file. When a lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. Use "change" and specify the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or -1 for infinite lease.
+ /// The lease break period duration is optional to break a lease, and specifies the break period of the lease in seconds. The lease break duration must be between 0 and 60 seconds.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID string formats.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The Path.LeaseAsync Message.
+ internal static Azure.Core.HttpMessage LeaseAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ Azure.Storage.Files.DataLake.Models.PathLeaseAction xMSLeaseAction,
+ string requestId = default,
+ int? timeout = default,
+ int? xMSLeaseDuration = default,
+ int? xMSLeaseBreakPeriod = default,
+ string leaseId = default,
+ string proposedLeaseId = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Post;
+ _request.Uri.Reset(resourceUri);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ _request.Headers.SetValue("x-ms-lease-action", Azure.Storage.Files.DataLake.DataLakeRestClient.Serialization.ToString(xMSLeaseAction));
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (xMSLeaseDuration != null) { _request.Headers.SetValue("x-ms-lease-duration", xMSLeaseDuration.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (xMSLeaseBreakPeriod != null) { _request.Headers.SetValue("x-ms-lease-break-period", xMSLeaseBreakPeriod.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (proposedLeaseId != null) { _request.Headers.SetValue("x-ms-proposed-lease-id", proposedLeaseId); }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.LeaseAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.LeaseAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathLeaseResult}.
+ internal static Azure.Response LeaseAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathLeaseResult _value = new Azure.Storage.Files.DataLake.Models.PathLeaseResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-id", out _header))
+ {
+ _value.LeaseId = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 201:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathLeaseResult _value = new Azure.Storage.Files.DataLake.Models.PathLeaseResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-id", out _header))
+ {
+ _value.LeaseId = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 202:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathLeaseResult _value = new Azure.Storage.Files.DataLake.Models.PathLeaseResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-time", out _header))
+ {
+ _value.LeaseTime = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.LeaseAsync
+
+ #region Path.ReadAsync
+ ///
+ /// Read the contents of a file. For read operations, range requests are supported. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// The HTTP Range request header specifies one or more byte ranges of the resource to be retrieved.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. When this header is set to "true" and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4MB in size. If this header is specified without the Range header, the service returns status code 400 (Bad Request). If this header is set to true when the range exceeds 4 MB in size, the service returns status code 400 (Bad Request).
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathReadResult}
+ public static async System.Threading.Tasks.ValueTask> ReadAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ string range = default,
+ string leaseId = default,
+ bool? xMSRangeGetContentMd5 = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.Read",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = ReadAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout,
+ range,
+ leaseId,
+ xMSRangeGetContentMd5,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return ReadAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.ReadAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// The HTTP Range request header specifies one or more byte ranges of the resource to be retrieved.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. When this header is set to "true" and specified together with the Range header, the service returns the MD5 hash for the range, as long as the range is less than or equal to 4MB in size. If this header is specified without the Range header, the service returns status code 400 (Bad Request). If this header is set to true when the range exceeds 4 MB in size, the service returns status code 400 (Bad Request).
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The Path.ReadAsync Message.
+ internal static Azure.Core.HttpMessage ReadAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ string range = default,
+ string leaseId = default,
+ bool? xMSRangeGetContentMd5 = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Get;
+ _request.Uri.Reset(resourceUri);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (range != null) { _request.Headers.SetValue("Range", range); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (xMSRangeGetContentMd5 != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Headers.SetValue("x-ms-range-get-content-md5", xMSRangeGetContentMd5.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.ReadAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.ReadAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathReadResult}.
+ internal static Azure.Response ReadAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathReadResult _value = new Azure.Storage.Files.DataLake.Models.PathReadResult();
+ _value.Body = response.ContentStream; // You should manually wrap with RetriableStream!
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("Accept-Ranges", out _header))
+ {
+ _value.AcceptRanges = _header;
+ }
+ if (response.Headers.TryGetValue("Cache-Control", out _header))
+ {
+ _value.CacheControl = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Disposition", out _header))
+ {
+ _value.ContentDisposition = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Encoding", out _header))
+ {
+ _value.ContentEncoding = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Language", out _header))
+ {
+ _value.ContentLanguage = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Length", out _header))
+ {
+ _value.ContentLength = long.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("Content-Range", out _header))
+ {
+ _value.ContentRange = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Type", out _header))
+ {
+ _value.ContentType = _header;
+ }
+ if (response.Headers.TryGetValue("Content-MD5", out _header))
+ {
+ _value.ContentMD5 = _header;
+ }
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-resource-type", out _header))
+ {
+ _value.ResourceType = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-properties", out _header))
+ {
+ _value.Properties = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-duration", out _header))
+ {
+ _value.LeaseDuration = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-state", out _header))
+ {
+ _value.LeaseState = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-status", out _header))
+ {
+ _value.LeaseStatus = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 206:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathReadResult _value = new Azure.Storage.Files.DataLake.Models.PathReadResult();
+ _value.Body = response.ContentStream; // You should manually wrap with RetriableStream!
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("Accept-Ranges", out _header))
+ {
+ _value.AcceptRanges = _header;
+ }
+ if (response.Headers.TryGetValue("Cache-Control", out _header))
+ {
+ _value.CacheControl = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Disposition", out _header))
+ {
+ _value.ContentDisposition = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Encoding", out _header))
+ {
+ _value.ContentEncoding = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Language", out _header))
+ {
+ _value.ContentLanguage = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Length", out _header))
+ {
+ _value.ContentLength = long.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("Content-Range", out _header))
+ {
+ _value.ContentRange = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Type", out _header))
+ {
+ _value.ContentType = _header;
+ }
+ if (response.Headers.TryGetValue("Content-MD5", out _header))
+ {
+ _value.ContentMD5 = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-content-md5", out _header))
+ {
+ _value.XMSContentMd5 = _header;
+ }
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-resource-type", out _header))
+ {
+ _value.ResourceType = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-properties", out _header))
+ {
+ _value.Properties = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-duration", out _header))
+ {
+ _value.LeaseDuration = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-state", out _header))
+ {
+ _value.LeaseState = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-status", out _header))
+ {
+ _value.LeaseStatus = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 304:
+ {
+ return new Azure.NoBodyResponse(response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.ReadAsync
+
+ #region Path.GetPropertiesAsync
+ ///
+ /// Get Properties returns all system and user defined properties for a path. Get Status returns all system defined properties for a path. Get Access Control List returns the access control list for a path. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. If the value is "getStatus" only the system defined properties for the path are returned. If the value is "getAccessControl" the access control list is returned in the response headers (Hierarchical Namespace must be enabled for the account), otherwise the properties are returned.
+ /// Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned as Azure Active Directory Object IDs. The default value is false. Note that group and application Object IDs are not translated because they do not have unique friendly names.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathGetPropertiesResult}
+ public static async System.Threading.Tasks.ValueTask> GetPropertiesAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction? action = default,
+ bool? upn = default,
+ string leaseId = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.GetProperties",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = GetPropertiesAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout,
+ action,
+ upn,
+ leaseId,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return GetPropertiesAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.GetPropertiesAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Optional. If the value is "getStatus" only the system defined properties for the path are returned. If the value is "getAccessControl" the access control list is returned in the response headers (Hierarchical Namespace must be enabled for the account), otherwise the properties are returned.
+ /// Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned as Azure Active Directory Object IDs. The default value is false. Note that group and application Object IDs are not translated because they do not have unique friendly names.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The Path.GetPropertiesAsync Message.
+ internal static Azure.Core.HttpMessage GetPropertiesAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction? action = default,
+ bool? upn = default,
+ string leaseId = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Head;
+ _request.Uri.Reset(resourceUri);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (action != null) { _request.Uri.AppendQuery("action", Azure.Storage.Files.DataLake.DataLakeRestClient.Serialization.ToString(action.Value)); }
+ if (upn != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("upn", upn.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.GetPropertiesAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.GetPropertiesAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathGetPropertiesResult}.
+ internal static Azure.Response GetPropertiesAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathGetPropertiesResult _value = new Azure.Storage.Files.DataLake.Models.PathGetPropertiesResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("Accept-Ranges", out _header))
+ {
+ _value.AcceptRanges = _header;
+ }
+ if (response.Headers.TryGetValue("Cache-Control", out _header))
+ {
+ _value.CacheControl = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Disposition", out _header))
+ {
+ _value.ContentDisposition = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Encoding", out _header))
+ {
+ _value.ContentEncoding = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Language", out _header))
+ {
+ _value.ContentLanguage = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Length", out _header))
+ {
+ _value.ContentLength = long.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("Content-Range", out _header))
+ {
+ _value.ContentRange = _header;
+ }
+ if (response.Headers.TryGetValue("Content-Type", out _header))
+ {
+ _value.ContentType = _header;
+ }
+ if (response.Headers.TryGetValue("Content-MD5", out _header))
+ {
+ _value.ContentMD5 = _header;
+ }
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-resource-type", out _header))
+ {
+ _value.ResourceType = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-properties", out _header))
+ {
+ _value.Properties = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-owner", out _header))
+ {
+ _value.Owner = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-group", out _header))
+ {
+ _value.Group = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-permissions", out _header))
+ {
+ _value.Permissions = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-acl", out _header))
+ {
+ _value.ACL = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-duration", out _header))
+ {
+ _value.LeaseDuration = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-state", out _header))
+ {
+ _value.LeaseState = _header;
+ }
+ if (response.Headers.TryGetValue("x-ms-lease-status", out _header))
+ {
+ _value.LeaseStatus = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ case 304:
+ {
+ return new Azure.NoBodyResponse(response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.GetPropertiesAsync
+
+ #region Path.DeleteAsync
+ ///
+ /// Delete the file or directory. This operation supports conditional HTTP requests. For more information, see [Specifying Conditional Headers for Blob Service Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Required
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathDeleteResult}
+ public static async System.Threading.Tasks.ValueTask> DeleteAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ bool? recursive = default,
+ string continuation = default,
+ string leaseId = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.Delete",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = DeleteAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ requestId,
+ timeout,
+ recursive,
+ continuation,
+ leaseId,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return DeleteAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.DeleteAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Required
+ /// Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// The Path.DeleteAsync Message.
+ internal static Azure.Core.HttpMessage DeleteAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ string requestId = default,
+ int? timeout = default,
+ bool? recursive = default,
+ string continuation = default,
+ string leaseId = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Delete;
+ _request.Uri.Reset(resourceUri);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (recursive != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("recursive", recursive.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+ if (continuation != null) { _request.Uri.AppendQuery("continuation", continuation); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.DeleteAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.DeleteAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathDeleteResult}.
+ internal static Azure.Response DeleteAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathDeleteResult _value = new Azure.Storage.Files.DataLake.Models.PathDeleteResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("x-ms-continuation", out _header))
+ {
+ _value.Continuation = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.DeleteAsync
+
+ #region Path.SetAccessControlAsync
+ ///
+ /// Set the owner, group, permissions, or access control list for a path.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. The owner of the blob or directory.
+ /// Optional. The owning group of the blob or directory.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ /// Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. Each access control entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format "[scope:][type]:[id]:[permissions]".
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathSetAccessControlResult}
+ public static async System.Threading.Tasks.ValueTask> SetAccessControlAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ int? timeout = default,
+ string leaseId = default,
+ string owner = default,
+ string group = default,
+ string permissions = default,
+ string acl = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ string requestId = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.SetAccessControl",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = SetAccessControlAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ timeout,
+ leaseId,
+ owner,
+ group,
+ permissions,
+ acl,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince,
+ requestId))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return SetAccessControlAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.SetAccessControlAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. The owner of the blob or directory.
+ /// Optional. The owning group of the blob or directory.
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ /// Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. Each access control entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format "[scope:][type]:[id]:[permissions]".
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The Path.SetAccessControlAsync Message.
+ internal static Azure.Core.HttpMessage SetAccessControlAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ int? timeout = default,
+ string leaseId = default,
+ string owner = default,
+ string group = default,
+ string permissions = default,
+ string acl = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ string requestId = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Patch;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("action", "setAccessControl", escapeValue: false);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (owner != null) { _request.Headers.SetValue("x-ms-owner", owner); }
+ if (group != null) { _request.Headers.SetValue("x-ms-group", group); }
+ if (permissions != null) { _request.Headers.SetValue("x-ms-permissions", permissions); }
+ if (acl != null) { _request.Headers.SetValue("x-ms-acl", acl); }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.SetAccessControlAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.SetAccessControlAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathSetAccessControlResult}.
+ internal static Azure.Response SetAccessControlAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathSetAccessControlResult _value = new Azure.Storage.Files.DataLake.Models.PathSetAccessControlResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-client-request-id", out _header))
+ {
+ _value.ClientRequestId = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.SetAccessControlAsync
+
+ #region Path.FlushDataAsync
+ ///
+ /// Set the owner, group, permissions, or access control list for a path.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all data has been written, and there must not be a request entity body included with the request.
+ /// Valid only for flush operations. If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted after the flush operation. The default is false. Data at offsets less than the specified position are written to the file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future flush operation.
+ /// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the flush operation completes successfully, the service raises a file change notification with a property indicating that this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that the file stream has been closed."
+ /// Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes for "Append Data".
+ /// Specify the transactional md5 for the body, to be validated by the service.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's Content-Disposition header.
+ /// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathFlushDataResult}
+ public static async System.Threading.Tasks.ValueTask> FlushDataAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ int? timeout = default,
+ long? position = default,
+ bool? retainUncommittedData = default,
+ bool? close = default,
+ long? contentLength = default,
+ byte[] contentHash = default,
+ string leaseId = default,
+ string cacheControl = default,
+ string contentType = default,
+ string contentDisposition = default,
+ string contentEncoding = default,
+ string contentLanguage = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ string requestId = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.FlushData",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = FlushDataAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ timeout,
+ position,
+ retainUncommittedData,
+ close,
+ contentLength,
+ contentHash,
+ leaseId,
+ cacheControl,
+ contentType,
+ contentDisposition,
+ contentEncoding,
+ contentLanguage,
+ ifMatch,
+ ifNoneMatch,
+ ifModifiedSince,
+ ifUnmodifiedSince,
+ requestId))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return FlushDataAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.FlushDataAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all data has been written, and there must not be a request entity body included with the request.
+ /// Valid only for flush operations. If "true", uncommitted data is retained after the flush operation completes; otherwise, the uncommitted data is deleted after the flush operation. The default is false. Data at offsets less than the specified position are written to the file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future flush operation.
+ /// Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, a file changed event is raised. This event has a property indicating whether this is the final change to distinguish the difference between an intermediate flush to a file stream and the final close of a file stream. The close query parameter is valid only when the action is "flush" and change notifications are enabled. If the value of close is "true" and the flush operation completes successfully, the service raises a file change notification with a property indicating that this is the final update (the file stream has been closed). If "false" a change notification is raised indicating the file has changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to indicate that the file stream has been closed."
+ /// Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes for "Append Data".
+ /// Specify the transactional md5 for the body, to be validated by the service.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Sets the blob's Content-Disposition header.
+ /// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
+ /// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request.
+ /// Specify an ETag value to operate only on blobs with a matching value.
+ /// Specify an ETag value to operate only on blobs without a matching value.
+ /// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+ /// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The Path.FlushDataAsync Message.
+ internal static Azure.Core.HttpMessage FlushDataAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ int? timeout = default,
+ long? position = default,
+ bool? retainUncommittedData = default,
+ bool? close = default,
+ long? contentLength = default,
+ byte[] contentHash = default,
+ string leaseId = default,
+ string cacheControl = default,
+ string contentType = default,
+ string contentDisposition = default,
+ string contentEncoding = default,
+ string contentLanguage = default,
+ Azure.ETag? ifMatch = default,
+ Azure.ETag? ifNoneMatch = default,
+ System.DateTimeOffset? ifModifiedSince = default,
+ System.DateTimeOffset? ifUnmodifiedSince = default,
+ string requestId = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Patch;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("action", "flush", escapeValue: false);
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (position != null) { _request.Uri.AppendQuery("position", position.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (retainUncommittedData != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("retainUncommittedData", retainUncommittedData.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+ if (close != null) {
+ #pragma warning disable CA1308 // Normalize strings to uppercase
+ _request.Uri.AppendQuery("close", close.Value.ToString(System.Globalization.CultureInfo.InvariantCulture).ToLowerInvariant());
+ #pragma warning restore CA1308 // Normalize strings to uppercase
+ }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (contentLength != null) { _request.Headers.SetValue("Content-Length", contentLength.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (contentHash != null) { _request.Headers.SetValue("x-ms-content-md5", System.Convert.ToBase64String(contentHash)); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (cacheControl != null) { _request.Headers.SetValue("x-ms-cache-control", cacheControl); }
+ if (contentType != null) { _request.Headers.SetValue("x-ms-content-type", contentType); }
+ if (contentDisposition != null) { _request.Headers.SetValue("x-ms-content-disposition", contentDisposition); }
+ if (contentEncoding != null) { _request.Headers.SetValue("x-ms-content-encoding", contentEncoding); }
+ if (contentLanguage != null) { _request.Headers.SetValue("x-ms-content-language", contentLanguage); }
+ if (ifMatch != null) { _request.Headers.SetValue("If-Match", ifMatch.Value.ToString()); }
+ if (ifNoneMatch != null) { _request.Headers.SetValue("If-None-Match", ifNoneMatch.Value.ToString()); }
+ if (ifModifiedSince != null) { _request.Headers.SetValue("If-Modified-Since", ifModifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (ifUnmodifiedSince != null) { _request.Headers.SetValue("If-Unmodified-Since", ifUnmodifiedSince.Value.ToString("R", System.Globalization.CultureInfo.InvariantCulture)); }
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.FlushDataAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.FlushDataAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathFlushDataResult}.
+ internal static Azure.Response FlushDataAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 200:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathFlushDataResult _value = new Azure.Storage.Files.DataLake.Models.PathFlushDataResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("ETag", out _header))
+ {
+ _value.ETag = new Azure.ETag(_header);
+ }
+ if (response.Headers.TryGetValue("Last-Modified", out _header))
+ {
+ _value.LastModified = System.DateTimeOffset.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("Content-Length", out _header))
+ {
+ _value.ContentLength = long.Parse(_header, System.Globalization.CultureInfo.InvariantCulture);
+ }
+ if (response.Headers.TryGetValue("x-ms-client-request-id", out _header))
+ {
+ _value.ClientRequestId = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.FlushDataAsync
+
+ #region Path.AppendDataAsync
+ ///
+ /// Append data to the file.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Initial data
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all data has been written, and there must not be a request entity body included with the request.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes for "Append Data".
+ /// Specify the transactional md5 for the body, to be validated by the service.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// Whether to invoke the operation asynchronously. The default value is true.
+ /// The ClientDiagnostics instance used for operation reporting.
+ /// Operation name.
+ /// Cancellation token.
+ /// Azure.Response{Azure.Storage.Files.DataLake.Models.PathAppendDataResult}
+ public static async System.Threading.Tasks.ValueTask> AppendDataAsync(
+ Azure.Core.Pipeline.ClientDiagnostics clientDiagnostics,
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ System.IO.Stream body,
+ long? position = default,
+ int? timeout = default,
+ long? contentLength = default,
+ byte[] transactionalContentHash = default,
+ string leaseId = default,
+ string requestId = default,
+ bool async = true,
+ string operationName = "Azure.Storage.Files.DataLake.PathClient.AppendData",
+ System.Threading.CancellationToken cancellationToken = default)
+ {
+ Azure.Core.Pipeline.DiagnosticScope _scope = clientDiagnostics.CreateScope(operationName);
+ try
+ {
+ _scope.AddAttribute("url", resourceUri);
+ _scope.Start();
+ using (Azure.Core.HttpMessage _message = AppendDataAsync_CreateMessage(
+ pipeline,
+ resourceUri,
+ body,
+ position,
+ timeout,
+ contentLength,
+ transactionalContentHash,
+ leaseId,
+ requestId))
+ {
+ if (async)
+ {
+ // Send the request asynchronously if we're being called via an async path
+ await pipeline.SendAsync(_message, cancellationToken).ConfigureAwait(false);
+ }
+ else
+ {
+ // Send the request synchronously through the API that blocks if we're being called via a sync path
+ // (this is safe because the Task will complete before the user can call Wait)
+ pipeline.Send(_message, cancellationToken);
+ }
+ Azure.Response _response = _message.Response;
+ cancellationToken.ThrowIfCancellationRequested();
+ return AppendDataAsync_CreateResponse(_response);
+ }
+ }
+ catch (System.Exception ex)
+ {
+ _scope.Failed(ex);
+ throw;
+ }
+ finally
+ {
+ _scope.Dispose();
+ }
+ }
+
+ ///
+ /// Create the Path.AppendDataAsync request.
+ ///
+ /// The pipeline used for sending requests.
+ /// The URL of the service account, container, or blob that is the targe of the desired operation.
+ /// Initial data
+ /// This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when flushing previously uploaded data to the file. The value must be the position where the data is to be appended. Uploaded data is not immediately flushed, or written, to the file. To flush, the previously uploaded data must be contiguous, the position parameter must be specified and equal to the length of the file after all data has been written, and there must not be a request entity body included with the request.
+ /// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ /// Required for "Append Data" and "Flush Data". Must be 0 for "Flush Data". Must be the length of the request content in bytes for "Append Data".
+ /// Specify the transactional md5 for the body, to be validated by the service.
+ /// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+ /// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ /// The Path.AppendDataAsync Message.
+ internal static Azure.Core.HttpMessage AppendDataAsync_CreateMessage(
+ Azure.Core.Pipeline.HttpPipeline pipeline,
+ System.Uri resourceUri,
+ System.IO.Stream body,
+ long? position = default,
+ int? timeout = default,
+ long? contentLength = default,
+ byte[] transactionalContentHash = default,
+ string leaseId = default,
+ string requestId = default)
+ {
+ // Validation
+ if (resourceUri == null)
+ {
+ throw new System.ArgumentNullException(nameof(resourceUri));
+ }
+ if (body == null)
+ {
+ throw new System.ArgumentNullException(nameof(body));
+ }
+
+ // Create the request
+ Azure.Core.HttpMessage _message = pipeline.CreateMessage();
+ Azure.Core.Request _request = _message.Request;
+
+ // Set the endpoint
+ _request.Method = Azure.Core.RequestMethod.Patch;
+ _request.Uri.Reset(resourceUri);
+ _request.Uri.AppendQuery("action", "append", escapeValue: false);
+ if (position != null) { _request.Uri.AppendQuery("position", position.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (timeout != null) { _request.Uri.AppendQuery("timeout", timeout.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+
+ // Add request headers
+ _request.Headers.SetValue("x-ms-version", "2019-02-02");
+ if (contentLength != null) { _request.Headers.SetValue("Content-Length", contentLength.Value.ToString(System.Globalization.CultureInfo.InvariantCulture)); }
+ if (transactionalContentHash != null) { _request.Headers.SetValue("Content-MD5", System.Convert.ToBase64String(transactionalContentHash)); }
+ if (leaseId != null) { _request.Headers.SetValue("x-ms-lease-id", leaseId); }
+ if (requestId != null) { _request.Headers.SetValue("x-ms-client-request-id", requestId); }
+
+ // Create the body
+ _request.Content = Azure.Core.RequestContent.Create(body);
+
+ return _message;
+ }
+
+ ///
+ /// Create the Path.AppendDataAsync response or throw a failure exception.
+ ///
+ /// The raw Response.
+ /// The Path.AppendDataAsync Azure.Response{Azure.Storage.Files.DataLake.Models.PathAppendDataResult}.
+ internal static Azure.Response AppendDataAsync_CreateResponse(
+ Azure.Response response)
+ {
+ // Process the response
+ switch (response.Status)
+ {
+ case 202:
+ {
+ // Create the result
+ Azure.Storage.Files.DataLake.Models.PathAppendDataResult _value = new Azure.Storage.Files.DataLake.Models.PathAppendDataResult();
+
+ // Get response headers
+ string _header;
+ if (response.Headers.TryGetValue("x-ms-client-request-id", out _header))
+ {
+ _value.ClientRequestId = _header;
+ }
+
+ // Create the response
+ return Response.FromValue(_value, response);
+ }
+ default:
+ {
+ // Create the result
+ string _value;
+ using (System.IO.StreamReader _streamReader = new System.IO.StreamReader(response.ContentStream))
+ {
+ _value = _streamReader.ReadToEnd();
+ }
+
+ throw _value.CreateException(response);
+ }
+ }
+ }
+ #endregion Path.AppendDataAsync
+ }
+ #endregion Path operations
+ }
+}
+#endregion Service
+
+#region Models
+#region class FileSystem
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// FileSystem
+ ///
+ internal partial class FileSystem
+ {
+ ///
+ /// name
+ ///
+ public string Name { get; internal set; }
+
+ ///
+ /// lastModified
+ ///
+ public string LastModified { get; internal set; }
+
+ ///
+ /// eTag
+ ///
+ public string ETag { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileSystem instances.
+ /// You can use DataLakeModelFactory.FileSystem instead.
+ ///
+ internal FileSystem() { }
+
+ ///
+ /// Deserializes XML into a new FileSystem instance.
+ ///
+ /// The XML element to deserialize.
+ /// A deserialized FileSystem instance.
+ internal static Azure.Storage.Files.DataLake.Models.FileSystem FromXml(System.Xml.Linq.XElement element)
+ {
+ System.Diagnostics.Debug.Assert(element != null);
+ System.Xml.Linq.XElement _child;
+ Azure.Storage.Files.DataLake.Models.FileSystem _value = new Azure.Storage.Files.DataLake.Models.FileSystem();
+ _child = element.Element(System.Xml.Linq.XName.Get("name", ""));
+ if (_child != null)
+ {
+ _value.Name = _child.Value;
+ }
+ _child = element.Element(System.Xml.Linq.XName.Get("lastModified", ""));
+ if (_child != null)
+ {
+ _value.LastModified = _child.Value;
+ }
+ _child = element.Element(System.Xml.Linq.XName.Get("eTag", ""));
+ if (_child != null)
+ {
+ _value.ETag = _child.Value;
+ }
+ CustomizeFromXml(element, _value);
+ return _value;
+ }
+
+ static partial void CustomizeFromXml(System.Xml.Linq.XElement element, Azure.Storage.Files.DataLake.Models.FileSystem value);
+ }
+}
+#endregion class FileSystem
+
+#region class FileSystemCreateResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// FileSystem CreateResult
+ ///
+ internal partial class FileSystemCreateResult
+ {
+ ///
+ /// An HTTP entity tag associated with the FileSystem.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the filesystem was last modified. Operations on files and directories do not affect the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// A bool string indicates whether the namespace feature is enabled. If "true", the namespace is enabled for the filesystem.
+ ///
+ public string NamespaceEnabled { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileSystemCreateResult instances.
+ /// You can use DataLakeModelFactory.FileSystemCreateResult instead.
+ ///
+ internal FileSystemCreateResult() { }
+ }
+}
+#endregion class FileSystemCreateResult
+
+#region class FileSystemGetPropertiesResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// FileSystem GetPropertiesResult
+ ///
+ internal partial class FileSystemGetPropertiesResult
+ {
+ ///
+ /// An HTTP entity tag associated with the filesystem. Changes to filesystem properties affect the entity tag, but operations on files and directories do not.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the filesystem was last modified. Changes to filesystem properties update the last modified time, but operations on files and directories do not.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// The user-defined properties associated with the filesystem. A comma-separated list of name and value pairs in the format "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set.
+ ///
+ public string Properties { get; internal set; }
+
+ ///
+ /// A bool string indicates whether the namespace feature is enabled. If "true", the namespace is enabled for the filesystem.
+ ///
+ public string NamespaceEnabled { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileSystemGetPropertiesResult instances.
+ /// You can use DataLakeModelFactory.FileSystemGetPropertiesResult instead.
+ ///
+ internal FileSystemGetPropertiesResult() { }
+ }
+}
+#endregion class FileSystemGetPropertiesResult
+
+#region class FileSystemListPathsResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// FileSystem ListPathsResult
+ ///
+ internal partial class FileSystemListPathsResult
+ {
+ ///
+ /// An HTTP entity tag associated with the filesystem. Changes to filesystem properties affect the entity tag, but operations on files and directories do not.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the filesystem was last modified. Changes to filesystem properties update the last modified time, but operations on files and directories do not.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// If the number of paths to be listed exceeds the maxResults limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the list operation to continue listing the paths.
+ ///
+ public string Continuation { get; internal set; }
+
+ ///
+ /// Body
+ ///
+ public System.IO.Stream Body { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileSystemListPathsResult instances.
+ /// You can use DataLakeModelFactory.FileSystemListPathsResult instead.
+ ///
+ internal FileSystemListPathsResult() { }
+ }
+}
+#endregion class FileSystemListPathsResult
+
+#region class FileSystemSetPropertiesResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// FileSystem SetPropertiesResult
+ ///
+ internal partial class FileSystemSetPropertiesResult
+ {
+ ///
+ /// An HTTP entity tag associated with the filesystem. Changes to filesystem properties affect the entity tag, but operations on files and directories do not.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the filesystem was last modified. Changes to filesystem properties update the last modified time, but operations on files and directories do not.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileSystemSetPropertiesResult instances.
+ /// You can use DataLakeModelFactory.FileSystemSetPropertiesResult instead.
+ ///
+ internal FileSystemSetPropertiesResult() { }
+ }
+}
+#endregion class FileSystemSetPropertiesResult
+
+#region class FileSystemList
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// FileSystemList
+ ///
+ internal partial class FileSystemList
+ {
+ ///
+ /// filesystems
+ ///
+ public System.Collections.Generic.IEnumerable Filesystems { get; internal set; }
+
+ ///
+ /// Creates a new FileSystemList instance
+ ///
+ public FileSystemList()
+ : this(false)
+ {
+ }
+
+ ///
+ /// Creates a new FileSystemList instance
+ ///
+ /// Whether to skip initializing nested objects.
+ internal FileSystemList(bool skipInitialization)
+ {
+ if (!skipInitialization)
+ {
+ Filesystems = new System.Collections.Generic.List();
+ }
+ }
+
+ ///
+ /// Deserializes XML into a new FileSystemList instance.
+ ///
+ /// The XML element to deserialize.
+ /// A deserialized FileSystemList instance.
+ internal static Azure.Storage.Files.DataLake.Models.FileSystemList FromXml(System.Xml.Linq.XElement element)
+ {
+ System.Diagnostics.Debug.Assert(element != null);
+ Azure.Storage.Files.DataLake.Models.FileSystemList _value = new Azure.Storage.Files.DataLake.Models.FileSystemList(true);
+ _value.Filesystems = System.Linq.Enumerable.ToList(
+ System.Linq.Enumerable.Select(
+ element.Elements(System.Xml.Linq.XName.Get("FileSystem", "")),
+ e => Azure.Storage.Files.DataLake.Models.FileSystem.FromXml(e)));
+ CustomizeFromXml(element, _value);
+ return _value;
+ }
+
+ static partial void CustomizeFromXml(System.Xml.Linq.XElement element, Azure.Storage.Files.DataLake.Models.FileSystemList value);
+ }
+}
+#endregion class FileSystemList
+
+#region class Path
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path
+ ///
+ internal partial class Path
+ {
+ ///
+ /// name
+ ///
+ public string Name { get; internal set; }
+
+ ///
+ /// isDirectory
+ ///
+ public bool? IsDirectory { get; internal set; }
+
+ ///
+ /// lastModified
+ ///
+ public string LastModified { get; internal set; }
+
+ ///
+ /// eTag
+ ///
+ public string ETag { get; internal set; }
+
+ ///
+ /// contentLength
+ ///
+ public long? ContentLength { get; internal set; }
+
+ ///
+ /// owner
+ ///
+ public string Owner { get; internal set; }
+
+ ///
+ /// group
+ ///
+ public string Group { get; internal set; }
+
+ ///
+ /// permissions
+ ///
+ public string Permissions { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of Path instances.
+ /// You can use DataLakeModelFactory.Path instead.
+ ///
+ internal Path() { }
+ }
+}
+#endregion class Path
+
+#region class PathAppendDataResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path AppendDataResult
+ ///
+ internal partial class PathAppendDataResult
+ {
+ ///
+ /// If a client request id header is sent in the request, this header will be present in the response with the same value.
+ ///
+ public string ClientRequestId { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathAppendDataResult instances.
+ /// You can use DataLakeModelFactory.PathAppendDataResult instead.
+ ///
+ internal PathAppendDataResult() { }
+ }
+}
+#endregion class PathAppendDataResult
+
+#region class PathCreateResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path CreateResult
+ ///
+ internal partial class PathCreateResult
+ {
+ ///
+ /// An HTTP entity tag associated with the file or directory.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// When renaming a directory, the number of paths that are renamed with each invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the rename operation to continue renaming the directory.
+ ///
+ public string Continuation { get; internal set; }
+
+ ///
+ /// The size of the resource in bytes.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathCreateResult instances.
+ /// You can use DataLakeModelFactory.PathCreateResult instead.
+ ///
+ internal PathCreateResult() { }
+ }
+}
+#endregion class PathCreateResult
+
+#region class PathDeleteResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path DeleteResult
+ ///
+ internal partial class PathDeleteResult
+ {
+ ///
+ /// When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the delete operation to continue deleting the directory.
+ ///
+ public string Continuation { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathDeleteResult instances.
+ /// You can use DataLakeModelFactory.PathDeleteResult instead.
+ ///
+ internal PathDeleteResult() { }
+ }
+}
+#endregion class PathDeleteResult
+
+#region class PathFlushDataResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path FlushDataResult
+ ///
+ internal partial class PathFlushDataResult
+ {
+ ///
+ /// An HTTP entity tag associated with the file or directory.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// The size of the resource in bytes.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// If a client request id header is sent in the request, this header will be present in the response with the same value.
+ ///
+ public string ClientRequestId { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathFlushDataResult instances.
+ /// You can use DataLakeModelFactory.PathFlushDataResult instead.
+ ///
+ internal PathFlushDataResult() { }
+ }
+}
+#endregion class PathFlushDataResult
+
+#region class PathGetPropertiesResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path GetPropertiesResult
+ ///
+ internal partial class PathGetPropertiesResult
+ {
+ ///
+ /// Indicates that the service supports requests for partial file content.
+ ///
+ public string AcceptRanges { get; internal set; }
+
+ ///
+ /// If the Cache-Control request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string CacheControl { get; internal set; }
+
+ ///
+ /// If the Content-Disposition request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentDisposition { get; internal set; }
+
+ ///
+ /// If the Content-Encoding request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentEncoding { get; internal set; }
+
+ ///
+ /// If the Content-Language request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentLanguage { get; internal set; }
+
+ ///
+ /// The size of the resource in bytes.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// Indicates the range of bytes returned in the event that the client requested a subset of the file by setting the Range request header.
+ ///
+ public string ContentRange { get; internal set; }
+
+ ///
+ /// The content type specified for the resource. If no content type was specified, the default content type is application/octet-stream.
+ ///
+ public string ContentType { get; internal set; }
+
+ ///
+ /// The MD5 hash of complete file stored in storage. This header is returned only for "GetProperties" operation. If the Content-MD5 header has been set for the file, this response header is returned for GetProperties call so that the client can check for message content integrity.
+ ///
+ public string ContentMD5 { get; internal set; }
+
+ ///
+ /// An HTTP entity tag associated with the file or directory.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// The type of the resource. The value may be "file" or "directory". If not set, the value is "file".
+ ///
+ public string ResourceType { get; internal set; }
+
+ ///
+ /// The user-defined properties associated with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set.
+ ///
+ public string Properties { get; internal set; }
+
+ ///
+ /// The owner of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.
+ ///
+ public string Owner { get; internal set; }
+
+ ///
+ /// The owning group of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.
+ ///
+ public string Group { get; internal set; }
+
+ ///
+ /// The POSIX access permissions for the file owner, the file owning group, and others. Included in the response if Hierarchical Namespace is enabled for the account.
+ ///
+ public string Permissions { get; internal set; }
+
+ ///
+ /// The POSIX access control list for the file or directory. Included in the response only if the action is "getAccessControl" and Hierarchical Namespace is enabled for the account.
+ ///
+ public string ACL { get; internal set; }
+
+ ///
+ /// When a resource is leased, specifies whether the lease is of infinite or fixed duration.
+ ///
+ public string LeaseDuration { get; internal set; }
+
+ ///
+ /// Lease state of the resource.
+ ///
+ public string LeaseState { get; internal set; }
+
+ ///
+ /// The lease status of the resource.
+ ///
+ public string LeaseStatus { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathGetPropertiesResult instances.
+ /// You can use DataLakeModelFactory.PathGetPropertiesResult instead.
+ ///
+ internal PathGetPropertiesResult() { }
+ }
+}
+#endregion class PathGetPropertiesResult
+
+#region class PathLeaseResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path LeaseResult
+ ///
+ internal partial class PathLeaseResult
+ {
+ ///
+ /// An HTTP entity tag associated with the file.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file was last modified. Write operations on the file update the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// The time remaining in the lease period in seconds.
+ ///
+ public string LeaseTime { get; internal set; }
+
+ ///
+ /// A successful "renew" action returns the lease ID.
+ ///
+ public string LeaseId { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathLeaseResult instances.
+ /// You can use DataLakeModelFactory.PathLeaseResult instead.
+ ///
+ internal PathLeaseResult() { }
+ }
+}
+#endregion class PathLeaseResult
+
+#region class PathReadResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path ReadResult
+ ///
+ internal partial class PathReadResult
+ {
+ ///
+ /// Indicates that the service supports requests for partial file content.
+ ///
+ public string AcceptRanges { get; internal set; }
+
+ ///
+ /// If the Cache-Control request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string CacheControl { get; internal set; }
+
+ ///
+ /// If the Content-Disposition request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentDisposition { get; internal set; }
+
+ ///
+ /// If the Content-Encoding request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentEncoding { get; internal set; }
+
+ ///
+ /// If the Content-Language request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentLanguage { get; internal set; }
+
+ ///
+ /// The size of the resource in bytes.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// Indicates the range of bytes returned in the event that the client requested a subset of the file by setting the Range request header.
+ ///
+ public string ContentRange { get; internal set; }
+
+ ///
+ /// The content type specified for the resource. If no content type was specified, the default content type is application/octet-stream.
+ ///
+ public string ContentType { get; internal set; }
+
+ ///
+ /// The MD5 hash of complete file. If the file has an MD5 hash and this read operation is to read the complete file, this response header is returned so that the client can check for message content integrity.
+ ///
+ public string ContentMD5 { get; internal set; }
+
+ ///
+ /// The MD5 hash of complete file stored in storage. If the file has a MD5 hash, and if request contains range header (Range or x-ms-range), this response header is returned with the value of the complete file's MD5 value. This value may or may not be equal to the value returned in Content-MD5 header, with the latter calculated from the requested range.
+ ///
+ public string XMSContentMd5 { get; internal set; }
+
+ ///
+ /// An HTTP entity tag associated with the file or directory.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// The type of the resource. The value may be "file" or "directory". If not set, the value is "file".
+ ///
+ public string ResourceType { get; internal set; }
+
+ ///
+ /// The user-defined properties associated with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set.
+ ///
+ public string Properties { get; internal set; }
+
+ ///
+ /// When a resource is leased, specifies whether the lease is of infinite or fixed duration.
+ ///
+ public string LeaseDuration { get; internal set; }
+
+ ///
+ /// Lease state of the resource.
+ ///
+ public string LeaseState { get; internal set; }
+
+ ///
+ /// The lease status of the resource.
+ ///
+ public string LeaseStatus { get; internal set; }
+
+ ///
+ /// Body
+ ///
+ public System.IO.Stream Body { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathReadResult instances.
+ /// You can use DataLakeModelFactory.PathReadResult instead.
+ ///
+ internal PathReadResult() { }
+ }
+}
+#endregion class PathReadResult
+
+#region class PathSetAccessControlResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path SetAccessControlResult
+ ///
+ internal partial class PathSetAccessControlResult
+ {
+ ///
+ /// An HTTP entity tag associated with the file or directory.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// If a client request id header is sent in the request, this header will be present in the response with the same value.
+ ///
+ public string ClientRequestId { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathSetAccessControlResult instances.
+ /// You can use DataLakeModelFactory.PathSetAccessControlResult instead.
+ ///
+ internal PathSetAccessControlResult() { }
+ }
+}
+#endregion class PathSetAccessControlResult
+
+#region class PathUpdateResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path UpdateResult
+ ///
+ internal partial class PathUpdateResult
+ {
+ ///
+ /// An MD5 hash of the request content. This header is only returned for "Flush" operation. This header is returned so that the client can check for message content integrity. This header refers to the content of the request, not actual file content.
+ ///
+ public string ContentMD5 { get; internal set; }
+
+ ///
+ /// An HTTP entity tag associated with the file or directory.
+ ///
+ public Azure.ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file or directory was last modified. Write operations on the file or directory update the last modified time.
+ ///
+ public System.DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// Indicates that the service supports requests for partial file content.
+ ///
+ public string AcceptRanges { get; internal set; }
+
+ ///
+ /// If the Cache-Control request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string CacheControl { get; internal set; }
+
+ ///
+ /// If the Content-Disposition request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentDisposition { get; internal set; }
+
+ ///
+ /// If the Content-Encoding request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentEncoding { get; internal set; }
+
+ ///
+ /// If the Content-Language request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentLanguage { get; internal set; }
+
+ ///
+ /// The size of the resource in bytes.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// Indicates the range of bytes returned in the event that the client requested a subset of the file by setting the Range request header.
+ ///
+ public string ContentRange { get; internal set; }
+
+ ///
+ /// The content type specified for the resource. If no content type was specified, the default content type is application/octet-stream.
+ ///
+ public string ContentType { get; internal set; }
+
+ ///
+ /// User-defined properties associated with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded string. Note that the string may only contain ASCII characters in the ISO-8859-1 character set.
+ ///
+ public string Properties { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathUpdateResult instances.
+ /// You can use DataLakeModelFactory.PathUpdateResult instead.
+ ///
+ internal PathUpdateResult() { }
+ }
+}
+#endregion class PathUpdateResult
+
+#region enum PathGetPropertiesAction
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Optional. If the value is "getStatus" only the system defined properties for the path are returned. If the value is "getAccessControl" the access control list is returned in the response headers (Hierarchical Namespace must be enabled for the account), otherwise the properties are returned.
+ ///
+ public enum PathGetPropertiesAction
+ {
+ ///
+ /// getAccessControl
+ ///
+ GetAccessControl,
+
+ ///
+ /// getStatus
+ ///
+ GetStatus
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static partial class DataLakeRestClient
+ {
+ public static partial class Serialization
+ {
+ public static string ToString(Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction value)
+ {
+ return value switch
+ {
+ Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction.GetAccessControl => "getAccessControl",
+ Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction.GetStatus => "getStatus",
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction value.")
+ };
+ }
+
+ public static Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction ParsePathGetPropertiesAction(string value)
+ {
+ return value switch
+ {
+ "getAccessControl" => Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction.GetAccessControl,
+ "getStatus" => Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction.GetStatus,
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathGetPropertiesAction value.")
+ };
+ }
+ }
+ }
+}
+#endregion enum PathGetPropertiesAction
+
+#region enum PathLeaseAction
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// There are five lease actions: "acquire", "break", "change", "renew", and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the lease break period is allowed to elapse, during which time no lease operation except break and release can be performed on the file. When a lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. Use "change" and specify the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease.
+ ///
+ public enum PathLeaseAction
+ {
+ ///
+ /// acquire
+ ///
+ Acquire,
+
+ ///
+ /// break
+ ///
+ Break,
+
+ ///
+ /// change
+ ///
+ Change,
+
+ ///
+ /// renew
+ ///
+ Renew,
+
+ ///
+ /// release
+ ///
+ Release
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static partial class DataLakeRestClient
+ {
+ public static partial class Serialization
+ {
+ public static string ToString(Azure.Storage.Files.DataLake.Models.PathLeaseAction value)
+ {
+ return value switch
+ {
+ Azure.Storage.Files.DataLake.Models.PathLeaseAction.Acquire => "acquire",
+ Azure.Storage.Files.DataLake.Models.PathLeaseAction.Break => "break",
+ Azure.Storage.Files.DataLake.Models.PathLeaseAction.Change => "change",
+ Azure.Storage.Files.DataLake.Models.PathLeaseAction.Renew => "renew",
+ Azure.Storage.Files.DataLake.Models.PathLeaseAction.Release => "release",
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathLeaseAction value.")
+ };
+ }
+
+ public static Azure.Storage.Files.DataLake.Models.PathLeaseAction ParsePathLeaseAction(string value)
+ {
+ return value switch
+ {
+ "acquire" => Azure.Storage.Files.DataLake.Models.PathLeaseAction.Acquire,
+ "break" => Azure.Storage.Files.DataLake.Models.PathLeaseAction.Break,
+ "change" => Azure.Storage.Files.DataLake.Models.PathLeaseAction.Change,
+ "renew" => Azure.Storage.Files.DataLake.Models.PathLeaseAction.Renew,
+ "release" => Azure.Storage.Files.DataLake.Models.PathLeaseAction.Release,
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathLeaseAction value.")
+ };
+ }
+ }
+ }
+}
+#endregion enum PathLeaseAction
+
+#region class PathList
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// PathList
+ ///
+ internal partial class PathList
+ {
+ ///
+ /// paths
+ ///
+ public System.Collections.Generic.IEnumerable Paths { get; internal set; }
+
+ ///
+ /// Creates a new PathList instance
+ ///
+ public PathList()
+ {
+ Paths = new System.Collections.Generic.List();
+ }
+ }
+}
+#endregion class PathList
+
+#region enum PathRenameMode
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Optional. Valid only when namespace is enabled. This parameter determines the behavior of the rename operation. The value must be "legacy" or "posix", and the default value will be "posix".
+ ///
+ public enum PathRenameMode
+ {
+ ///
+ /// legacy
+ ///
+ Legacy,
+
+ ///
+ /// posix
+ ///
+ Posix
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static partial class DataLakeRestClient
+ {
+ public static partial class Serialization
+ {
+ public static string ToString(Azure.Storage.Files.DataLake.Models.PathRenameMode value)
+ {
+ return value switch
+ {
+ Azure.Storage.Files.DataLake.Models.PathRenameMode.Legacy => "legacy",
+ Azure.Storage.Files.DataLake.Models.PathRenameMode.Posix => "posix",
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathRenameMode value.")
+ };
+ }
+
+ public static Azure.Storage.Files.DataLake.Models.PathRenameMode ParsePathRenameMode(string value)
+ {
+ return value switch
+ {
+ "legacy" => Azure.Storage.Files.DataLake.Models.PathRenameMode.Legacy,
+ "posix" => Azure.Storage.Files.DataLake.Models.PathRenameMode.Posix,
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathRenameMode value.")
+ };
+ }
+ }
+ }
+}
+#endregion enum PathRenameMode
+
+#region enum PathResourceType
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Required only for Create File and Create Directory. The value must be "file" or "directory".
+ ///
+ public enum PathResourceType
+ {
+ ///
+ /// directory
+ ///
+ Directory,
+
+ ///
+ /// file
+ ///
+ File
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static partial class DataLakeRestClient
+ {
+ public static partial class Serialization
+ {
+ public static string ToString(Azure.Storage.Files.DataLake.Models.PathResourceType value)
+ {
+ return value switch
+ {
+ Azure.Storage.Files.DataLake.Models.PathResourceType.Directory => "directory",
+ Azure.Storage.Files.DataLake.Models.PathResourceType.File => "file",
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathResourceType value.")
+ };
+ }
+
+ public static Azure.Storage.Files.DataLake.Models.PathResourceType ParsePathResourceType(string value)
+ {
+ return value switch
+ {
+ "directory" => Azure.Storage.Files.DataLake.Models.PathResourceType.Directory,
+ "file" => Azure.Storage.Files.DataLake.Models.PathResourceType.File,
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathResourceType value.")
+ };
+ }
+ }
+ }
+}
+#endregion enum PathResourceType
+
+#region enum PathUpdateAction
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// The action must be "append" to upload data to be appended to a file, "flush" to flush previously uploaded data to a file, "setProperties" to set the properties of a file or directory, or "setAccessControl" to set the owner, group, permissions, or access control list for a file or directory. Note that Hierarchical Namespace must be enabled for the account in order to use access control. Also note that the Access Control List (ACL) includes permissions for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers are mutually exclusive.
+ ///
+ public enum PathUpdateAction
+ {
+ ///
+ /// append
+ ///
+ Append,
+
+ ///
+ /// flush
+ ///
+ Flush,
+
+ ///
+ /// setProperties
+ ///
+ SetProperties,
+
+ ///
+ /// setAccessControl
+ ///
+ SetAccessControl
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static partial class DataLakeRestClient
+ {
+ public static partial class Serialization
+ {
+ public static string ToString(Azure.Storage.Files.DataLake.Models.PathUpdateAction value)
+ {
+ return value switch
+ {
+ Azure.Storage.Files.DataLake.Models.PathUpdateAction.Append => "append",
+ Azure.Storage.Files.DataLake.Models.PathUpdateAction.Flush => "flush",
+ Azure.Storage.Files.DataLake.Models.PathUpdateAction.SetProperties => "setProperties",
+ Azure.Storage.Files.DataLake.Models.PathUpdateAction.SetAccessControl => "setAccessControl",
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathUpdateAction value.")
+ };
+ }
+
+ public static Azure.Storage.Files.DataLake.Models.PathUpdateAction ParsePathUpdateAction(string value)
+ {
+ return value switch
+ {
+ "append" => Azure.Storage.Files.DataLake.Models.PathUpdateAction.Append,
+ "flush" => Azure.Storage.Files.DataLake.Models.PathUpdateAction.Flush,
+ "setProperties" => Azure.Storage.Files.DataLake.Models.PathUpdateAction.SetProperties,
+ "setAccessControl" => Azure.Storage.Files.DataLake.Models.PathUpdateAction.SetAccessControl,
+ _ => throw new System.ArgumentOutOfRangeException(nameof(value), value, "Unknown Azure.Storage.Files.DataLake.Models.PathUpdateAction value.")
+ };
+ }
+ }
+ }
+}
+#endregion enum PathUpdateAction
+
+#region class ServiceListFileSystemsResult
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Service ListFileSystemsResult
+ ///
+ internal partial class ServiceListFileSystemsResult
+ {
+ ///
+ /// If the number of filesystems to be listed exceeds the maxResults limit, a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent invocation of the list operation to continue listing the filesystems.
+ ///
+ public string Continuation { get; internal set; }
+
+ ///
+ /// The content type of list filesystem response. The default content type is application/json.
+ ///
+ public string ContentType { get; internal set; }
+
+ ///
+ /// Body
+ ///
+ public Azure.Storage.Files.DataLake.Models.FileSystemList Body { get; internal set; }
+
+ ///
+ /// Creates a new ServiceListFileSystemsResult instance
+ ///
+ public ServiceListFileSystemsResult()
+ {
+ Body = new Azure.Storage.Files.DataLake.Models.FileSystemList();
+ }
+ }
+}
+#endregion class ServiceListFileSystemsResult
+#endregion Models
+
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/LeaseExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/LeaseExtensions.cs
new file mode 100644
index 000000000000..a6fc119c11d4
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/LeaseExtensions.cs
@@ -0,0 +1,19 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class LeaseExtensions
+ {
+ internal static DataLakeLease ToDataLakeLease(this Blobs.Models.BlobLease blobLease) =>
+ new DataLakeLease()
+ {
+ ETag = blobLease.ETag,
+ LastModified = blobLease.LastModified,
+ LeaseId = blobLease.LeaseId,
+ LeaseTime = blobLease.LeaseTime
+ };
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/CopyStatus.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/CopyStatus.cs
new file mode 100644
index 000000000000..f35abcdb6005
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/CopyStatus.cs
@@ -0,0 +1,33 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// CopyStatus values
+ ///
+#pragma warning disable CA1717 // Only FlagsAttribute enums should have plural names
+ public enum CopyStatus
+#pragma warning restore CA1717 // Only FlagsAttribute enums should have plural names
+ {
+ ///
+ /// pending
+ ///
+ Pending,
+
+ ///
+ /// success
+ ///
+ Success,
+
+ ///
+ /// aborted
+ ///
+ Aborted,
+
+ ///
+ /// failed
+ ///
+ Failed
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/CustomerProvidedKey.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/CustomerProvidedKey.cs
new file mode 100644
index 000000000000..109139409d88
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/CustomerProvidedKey.cs
@@ -0,0 +1,103 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Security.Cryptography;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Wrapper for an encryption key to be used with client provided key server-side encryption.
+ ///
+ public readonly struct CustomerProvidedKey : IEquatable
+ {
+ ///
+ /// Base64 encoded string of the AES256 encryption key.
+ ///
+ public readonly string EncryptionKey { get; }
+
+ ///
+ /// Base64 encoded string of the AES256 encryption key's SHA256 hash.
+ ///
+ public readonly string EncryptionKeyHash { get; }
+
+ ///
+ /// The algorithm for Azure Blob Storage to encrypt with.
+ /// Azure Blob Storage only offers AES256 encryption.
+ ///
+ public readonly EncryptionAlgorithmType EncryptionAlgorithm { get; }
+
+ ///
+ /// Creates a new CustomerProvidedKey for use in server-side encryption.
+ ///
+ /// The encryption key encoded as a base64 string.
+ public CustomerProvidedKey(string key)
+ {
+ EncryptionKey = key;
+ EncryptionAlgorithm = EncryptionAlgorithmType.AES256;
+ using var sha256 = SHA256.Create();
+ var encodedHash = sha256.ComputeHash(Convert.FromBase64String(key));
+ EncryptionKeyHash = Convert.ToBase64String(encodedHash);
+ }
+
+ ///
+ /// Creates a new CustomerProvidedKey for use in server-side encryption.
+ ///
+ /// The encryption key bytes.
+ public CustomerProvidedKey(byte[] key) : this(Convert.ToBase64String(key)) { }
+
+ ///
+ /// Checks if two CustomerProvidedKeyInfo are equal to each other.
+ ///
+ /// The other instance to compare to.
+ public override bool Equals(object obj)
+ => obj is CustomerProvidedKey other && Equals(other);
+
+ ///
+ /// Get a hash code for the CustomerProvidedKeyInfo.
+ ///
+ /// Hash code for the CustomerProvidedKeyInfo.
+ public override int GetHashCode()
+ => EncryptionKey.GetHashCode()
+ ^ EncryptionKeyHash.GetHashCode()
+ ^ EncryptionAlgorithm.GetHashCode()
+ ;
+
+ ///
+ /// Check if two CustomerProvidedKeyInfo instances are equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're equal, false otherwise.
+ public static bool operator ==(CustomerProvidedKey left, CustomerProvidedKey right) => left.Equals(right);
+
+ ///
+ /// Check if two CustomerProvidedKeyInfo instances are not equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're not equal, false otherwise.
+ public static bool operator !=(CustomerProvidedKey left, CustomerProvidedKey right) => !(left == right);
+
+ ///
+ /// Checks if two CustomerProvidedKeyInfo are equal to each other.
+ ///
+ /// The other instance to compare to.
+ ///
+ public bool Equals(CustomerProvidedKey other)
+ => EncryptionKey == other.EncryptionKey
+ && EncryptionKeyHash == other.EncryptionKeyHash
+ && EncryptionAlgorithm == other.EncryptionAlgorithm
+ ;
+
+ ///
+ /// ToString
+ ///
+ /// string
+ public override string ToString()
+ => $"[{nameof(CustomerProvidedKey)}:{nameof(EncryptionKey)}={EncryptionKey};{nameof(EncryptionKeyHash)}={EncryptionKeyHash};{nameof(EncryptionAlgorithm)}={EncryptionAlgorithm}]";
+
+ internal Blobs.Models.CustomerProvidedKey ToBlobCustomerProvidedKey() =>
+ new Blobs.Models.CustomerProvidedKey(EncryptionKey);
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeLease.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeLease.cs
new file mode 100644
index 000000000000..09a21c6d063d
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeLease.cs
@@ -0,0 +1,40 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Lease
+ ///
+ public class DataLakeLease
+ {
+ ///
+ /// The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes.
+ ///
+ public ETag ETag { get; internal set; }
+
+ ///
+ /// Returns the date and time the container was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob.
+ ///
+ public DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// Uniquely identifies a container's or blob's lease
+ ///
+ public string LeaseId { get; internal set; }
+
+ ///
+ /// Gets the approximate time remaining in the lease period, in
+ /// seconds. This is only provided when breaking a lease.
+ ///
+ public int? LeaseTime { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of Lease instances.
+ /// You can use DataLakeModelFactory.Lease instead.
+ ///
+ internal DataLakeLease() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeModelFactory.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeModelFactory.cs
new file mode 100644
index 000000000000..cfb62d9c8768
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeModelFactory.cs
@@ -0,0 +1,367 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// DataLakeModelFactory provides utilities for mocking.
+ ///
+ public static partial class DataLakeModelFactory
+ {
+ #region FileDownloadDetails
+ ///
+ /// Creates a new FileDownloadDetails instance for mocking.
+ ///
+ public static FileDownloadDetails FileDownloadDetails(
+ DateTimeOffset lastModified,
+ IDictionary metadata,
+ string contentRange,
+ ETag eTag,
+ string contentEncoding,
+ string cacheControl,
+ string contentDisposition,
+ string contentLanguage,
+ DateTimeOffset copyCompletionTime,
+ string copyStatusDescription,
+ string copyId,
+ string copyProgress,
+ Uri copySource,
+ CopyStatus copyStatus,
+ LeaseDurationType leaseDuration,
+ LeaseState leaseState,
+ LeaseStatus leaseStatus,
+ string acceptRanges,
+ bool isServerEncrypted,
+ string encryptionKeySha256,
+ byte[] contentHash)
+ => new FileDownloadDetails()
+ {
+ LastModified = lastModified,
+ Metadata = metadata,
+ ContentRange = contentRange,
+ ETag = eTag,
+ ContentEncoding = contentEncoding,
+ CacheControl = cacheControl,
+ ContentDisposition = contentDisposition,
+ ContentLanguage = contentLanguage,
+ CopyCompletedOn = copyCompletionTime,
+ CopyStatusDescription = copyStatusDescription,
+ CopyId = copyId,
+ CopyProgress = copyProgress,
+ CopySource = copySource,
+ CopyStatus = copyStatus,
+ LeaseDuration = leaseDuration,
+ LeaseState = leaseState,
+ LeaseStatus = leaseStatus,
+ AcceptRanges = acceptRanges,
+ IsServerEncrypted = isServerEncrypted,
+ EncryptionKeySha256 = encryptionKeySha256,
+ ContentHash = contentHash
+ };
+ #endregion FileDownloadDetails
+
+ #region FileDownloadInfo
+ ///
+ /// Creates a new FileDownloadInfo instance for mocking.
+ ///
+ public static FileDownloadInfo FileDownloadInfo(
+ long contentLength,
+ Stream content,
+ byte[] contentHash,
+ FileDownloadDetails properties)
+ => new FileDownloadInfo()
+ {
+ ContentLength = contentLength,
+ Content = content,
+ ContentHash = contentHash,
+ Properties = properties
+ };
+ #endregion FileDownloadInfo
+
+ #region FileSystemInfo
+ ///
+ /// Creates a new FileSystemInfo instance for mocking.
+ ///
+ public static FileSystemInfo FileSystemInfo(
+ ETag etag,
+ DateTimeOffset lastModified)
+ => new FileSystemInfo()
+ {
+ ETag = etag,
+ LastModified = lastModified
+ };
+ #endregion FileSystemInfo
+
+ #region FileSystemItem
+ ///
+ /// Creates a new FileSystemItem instance for mocking.
+ ///
+ public static FileSystemItem FileSystemItem(
+ string name,
+ FileSystemProperties properties,
+ IDictionary metadata)
+ => new FileSystemItem()
+ {
+ Name = name,
+ Properties = properties,
+ Metadata = metadata
+ };
+ #endregion FileSystemItem
+
+ #region FileSystemProperties
+ ///
+ /// Creates a new FileSystemProperties instance for mocking.
+ ///
+ public static FileSystemProperties FileSystemProperties(
+ DateTimeOffset lastModified,
+ LeaseStatus? leaseStatus,
+ LeaseState? leaseState,
+ LeaseDurationType? leaseDuration,
+ PublicAccessType? publicAccess,
+ bool? hasImmutabilityPolicy,
+ bool? hasLegalHold,
+ ETag eTag)
+ => new FileSystemProperties()
+ {
+ LastModified = lastModified,
+ LeaseStatus = leaseStatus,
+ LeaseState = leaseState,
+ LeaseDuration = leaseDuration,
+ PublicAccess = publicAccess,
+ HasImmutabilityPolicy = hasImmutabilityPolicy,
+ HasLegalHold = hasLegalHold,
+ ETag = eTag
+ };
+ #endregion FileSystemProperties
+
+ #region Lease
+ ///
+ /// Creates a new Lease instance for mocking.
+ ///
+ public static DataLakeLease Lease(
+ ETag eTag,
+ DateTimeOffset lastModified,
+ string leaseId,
+ int? leaseTime)
+ => new DataLakeLease()
+ {
+ ETag = eTag,
+ LastModified = lastModified,
+ LeaseId = leaseId,
+ LeaseTime = leaseTime
+ };
+ #endregion Lease
+
+ #region PathAccessControl
+ ///
+ /// Creates a new PathAccessControl instance for mocking.
+ ///
+ public static PathAccessControl PathAccessControl(
+ string owner,
+ string group,
+ string permissions,
+ string acl)
+ => new PathAccessControl()
+ {
+ Owner = owner,
+ Group = group,
+ Permissions = permissions,
+ Acl = acl
+ };
+ #endregion PathAccessControl
+
+ #region PathContentInfo
+ ///
+ /// Creates a new PathContentInfo instance for mocking.
+ ///
+ public static PathContentInfo PathContentInfo(
+ string contentHash,
+ ETag eTag,
+ DateTimeOffset lastModified,
+ string acceptRanges,
+ string cacheControl,
+ string contentDisposition,
+ string contentEncoding,
+ string contentLanguage,
+ long contentLength,
+ string contentRange,
+ string contentType,
+ IDictionary metadata)
+ => new PathContentInfo()
+ {
+ ContentHash = contentHash,
+ ETag = eTag,
+ LastModified = lastModified,
+ AcceptRanges = acceptRanges,
+ CacheControl = cacheControl,
+ ContentDisposition = contentDisposition,
+ ContentEncoding = contentEncoding,
+ ContentLanguage = contentLanguage,
+ ContentLength = contentLength,
+ ContentRange = contentRange,
+ ContentType = contentType,
+ Metadata = metadata
+ };
+ #endregion PathContentInfo
+
+ #region PathCreateInfo
+ ///
+ /// Creates a new PathCreateInfo instance for mocking.
+ ///
+ public static PathCreateInfo PathCreateInfo(
+ PathInfo pathInfo,
+ string continuation)
+ => new PathCreateInfo()
+ {
+ PathInfo = pathInfo,
+ Continuation = continuation
+ };
+ #endregion PathCreateInfo
+
+ #region PathInfo
+ ///
+ /// Creates a new PathInfo instance for mocking.
+ ///
+ public static PathInfo PathInfo(
+ ETag eTag,
+ DateTimeOffset lastModified)
+ => new PathInfo()
+ {
+ ETag = eTag,
+ LastModified = lastModified
+ };
+ #endregion PathInfo
+
+ #region PathItem
+ ///
+ /// Creates a new PathItem instance for mocking.
+ ///
+ public static PathItem PathItem(
+ string name,
+ bool? isDirectory,
+ DateTimeOffset lastModified,
+ ETag eTag,
+ long? contentLength,
+ string owner,
+ string group,
+ string permissions)
+ => new PathItem()
+ {
+ Name = name,
+ IsDirectory = isDirectory,
+ LastModified = lastModified,
+ ETag = eTag,
+ ContentLength = contentLength,
+ Owner = owner,
+ Group = group,
+ Permissions = permissions
+ };
+ #endregion PathItem
+
+ #region PathProperties
+ ///
+ /// Creates a new PathProperties instance for mocking.
+ ///
+ public static PathProperties PathProperties(
+ DateTimeOffset lastModified,
+ DateTimeOffset creationTime,
+ IDictionary metadata,
+ DateTimeOffset copyCompletionTime,
+ string copyStatusDescription,
+ string copyId,
+ string copyProgress,
+ Uri copySource,
+ CopyStatus copyStatus,
+ bool isIncrementalCopy,
+ LeaseDurationType leaseDuration,
+ LeaseState leaseState,
+ LeaseStatus leaseStatus,
+ long contentLength,
+ string contentType,
+ ETag eTag,
+ byte[] contentHash,
+ IEnumerable contentEncoding,
+ string contentDisposition,
+ IEnumerable contentLanguage,
+ string cacheControl,
+ string acceptRanges,
+ bool isServerEncrypted,
+ string encryptionKeySha256,
+ string accessTier,
+ string archiveStatus,
+ DateTimeOffset accessTierChangeTime)
+ => new PathProperties()
+ {
+ LastModified = lastModified,
+ CreatedOn = creationTime,
+ Metadata = metadata,
+ CopyCompletedOn = copyCompletionTime,
+ CopyStatusDescription = copyStatusDescription,
+ CopyId = copyId,
+ CopyProgress = copyProgress,
+ CopySource = copySource,
+ CopyStatus = copyStatus,
+ IsIncrementalCopy = isIncrementalCopy,
+ LeaseDuration = leaseDuration,
+ LeaseState = leaseState,
+ LeaseStatus = leaseStatus,
+ ContentLength = contentLength,
+ ContentType = contentType,
+ ETag = eTag,
+ ContentHash = contentHash,
+ ContentEncoding = contentEncoding,
+ ContentDisposition = contentDisposition,
+ ContentLanguage = contentLanguage,
+ CacheControl = cacheControl,
+ AcceptRanges = acceptRanges,
+ IsServerEncrypted = isServerEncrypted,
+ EncryptionKeySha256 = encryptionKeySha256,
+ AccessTier = accessTier,
+ ArchiveStatus = archiveStatus,
+ AccessTierChangedOn = accessTierChangeTime
+ };
+ #endregion PathProperties
+
+ #region PathSegment
+ ///
+ /// Creates a new PathSegment instance for mocking.
+ ///
+ public static PathSegment PathSegment(
+ string continuation,
+ IEnumerable paths)
+ => new PathSegment()
+ {
+ Continuation = continuation,
+ Paths = paths
+ };
+ #endregion PathSegment
+
+ #region UserDelegationKey
+ ///
+ /// Creates a new UserDelegationKey instance for mocking.
+ ///
+ public static UserDelegationKey UserDelegationKey(
+ string signedObjectId,
+ string signedTenantId,
+ DateTimeOffset signedStart,
+ DateTimeOffset signedExpiry,
+ string signedService,
+ string signedVersion,
+ string value)
+ => new UserDelegationKey()
+ {
+ SignedObjectId = signedObjectId,
+ SignedTenantId = signedTenantId,
+ SignedStartsOn = signedStart,
+ SignedExpiresOn = signedExpiry,
+ SignedService = signedService,
+ SignedVersion = signedVersion,
+ Value = value
+ };
+ #endregion UserDelegationKey
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeRequestConditions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeRequestConditions.cs
new file mode 100644
index 000000000000..88a0fa8cd5e8
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/DataLakeRequestConditions.cs
@@ -0,0 +1,12 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs.Models;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Specifies file system specific access conditions.
+ ///
+ public class DataLakeRequestConditions : BlobRequestConditions { }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/EncryptionAlgorithmType.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/EncryptionAlgorithmType.cs
new file mode 100644
index 000000000000..2ba446b6b0a4
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/EncryptionAlgorithmType.cs
@@ -0,0 +1,17 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256".
+ /// Must be provided if the x-ms-encryption-key header is provided.
+ ///
+ public enum EncryptionAlgorithmType
+ {
+ ///
+ /// AES256
+ ///
+ AES256
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileDownloadDetails.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileDownloadDetails.cs
new file mode 100644
index 000000000000..d97c8fd4b456
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileDownloadDetails.cs
@@ -0,0 +1,152 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Properties returned when downloading a File
+ ///
+ public class FileDownloadDetails
+ {
+ ///
+ /// Returns the date and time the path was last modified. Any operation that modifies the file,
+ /// including an update of the file's metadata or properties, changes the last-modified time of the file.
+ ///
+ public DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// Path metadata.
+ ///
+ public IDictionary Metadata { get; internal set; }
+
+ ///
+ /// Indicates the range of bytes returned in the event that the client requested a subset of the file
+ /// setting the 'Range' request header.
+ ///
+ public string ContentRange { get; internal set; }
+
+ ///
+ /// The ETag contains a value that you can use to perform operations conditionally.
+ /// If the request version is 2011-08-18 or newer, the ETag value will be in quotes.
+ ///
+ public ETag ETag { get; internal set; }
+
+ ///
+ /// This header returns the value that was specified for the Content-Encoding request header
+ ///
+ public string ContentEncoding { get; internal set; }
+
+ ///
+ /// This header is returned if it was previously specified for the file.
+ ///
+ public string CacheControl { get; internal set; }
+
+ ///
+ /// This header returns the value that was specified for the 'x-ms-blob-content-disposition' header.
+ /// The Content-Disposition response header field conveys additional information about how to process the response payload,
+ /// and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent
+ /// should not display the response, but instead show a Save As dialog with a filename other than the file name specified.
+ ///
+ public string ContentDisposition { get; internal set; }
+
+ ///
+ /// This header returns the value that was specified for the Content-Language request header.
+ ///
+ public string ContentLanguage { get; internal set; }
+
+ ///
+ /// Conclusion time of the last attempted Copy Blob operation where this file was the destination file.
+ /// This value can specify the time of a completed, aborted, or failed copy attempt. This header does not
+ /// appear if a copy is pending, if this blob has never been the destination in a Copy Blob operation, or
+ /// if this blob has been modified after a concluded Copy Blob operation using Set Blob Properties,
+ /// Put Blob, or Put Block List.
+ ///
+ public DateTimeOffset CopyCompletedOn { get; internal set; }
+
+ ///
+ /// Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or
+ /// non-fatal copy operation failure. This header does not appear if this blob has never been the destination
+ /// in a Copy Blob operation, or if this blob has been modified after a concluded Copy Blob operation using
+ /// Set Blob Properties, Put Blob, or Put Block List
+ ///
+ public string CopyStatusDescription { get; internal set; }
+
+ ///
+ /// String identifier for this copy operation. Use with Get Blob Properties to check the status of this copy
+ /// operation, or pass to Abort Copy Blob to abort a pending copy.
+ ///
+ public string CopyId { get; internal set; }
+
+ ///
+ /// Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation
+ /// where this blob was the destination blob. Can show between 0 and Content-Length bytes copied. This header does not
+ /// appear if this blob has never been the destination in a Copy Blob operation, or if this blob has been modified after
+ /// a concluded Copy Blob operation using Set Blob Properties, Put Blob, or Put Block List
+ ///
+ public string CopyProgress { get; internal set; }
+
+ ///
+ /// URL up to 2 KB in length that specifies the source blob or file used in the last attempted Copy Blob
+ /// operation where this blob was the destination blob. This header does not appear if this blob has never
+ /// been the destination in a Copy Blob operation, or if this blob has been modified after a concluded Copy
+ /// Blob operation using Set Blob Properties, Put Blob, or Put Block List.
+ ///
+ public Uri CopySource { get; internal set; }
+
+ ///
+ /// State of the copy operation identified by x-ms-copy-id.
+ ///
+ public CopyStatus CopyStatus { get; internal set; }
+
+ ///
+ /// When a file is leased, specifies whether the lease is of infinite or fixed duration.
+ ///
+ public LeaseDurationType LeaseDuration { get; internal set; }
+
+ ///
+ /// Lease state of the file.
+ ///
+ public LeaseState LeaseState { get; internal set; }
+
+ ///
+ /// The current lease status of the file.
+ ///
+ public LeaseStatus LeaseStatus { get; internal set; }
+
+ ///
+ /// Indicates that the service supports requests for partial file content.
+ ///
+ public string AcceptRanges { get; internal set; }
+
+ ///
+ /// The value of this header is set to true if the file data and application metadata are completely
+ /// encrypted using the specified algorithm. Otherwise, the value is set to false (when the file is
+ /// unencrypted, or if only parts of the file/application metadata are encrypted).
+ ///
+ public bool IsServerEncrypted { get; internal set; }
+
+ ///
+ /// The SHA-256 hash of the encryption key used to encrypt the file. This header is only returned when
+ /// the file was encrypted with a customer-provided key.
+ ///
+ public string EncryptionKeySha256 { get; internal set; }
+
+ ///
+ /// If the file has a MD5 hash, and if request contains range header (Range or x-ms-range), this response
+ /// header is returned with the value of the whole file's MD5 value. This value may or may not be equal
+ /// to the value returned in Content-MD5 header, with the latter calculated from the requested range
+ ///
+#pragma warning disable CA1819 // Properties should not return arrays
+ public byte[] ContentHash { get; internal set; }
+#pragma warning restore CA1819 // Properties should not return arrays
+
+ ///
+ /// Prevent direct instantiation of FileDownloadDetails instances.
+ /// You can use DataLakeModelFactory.FileDownloadDetails instead.
+ ///
+ internal FileDownloadDetails() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileDownloadInfo.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileDownloadInfo.cs
new file mode 100644
index 000000000000..080a1538b92a
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileDownloadInfo.cs
@@ -0,0 +1,42 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System.IO;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// The properties and Content returned from downloading a blob
+ ///
+ public class FileDownloadInfo
+ {
+ ///
+ /// The number of bytes present in the response body.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// Content
+ ///
+ public Stream Content { get; internal set; }
+
+ ///
+ /// If the file has an MD5 hash and this operation is to read the full file,
+ /// this response header is returned so that the client can check for message content integrity.
+ ///
+#pragma warning disable CA1819 // Properties should not return arrays
+ public byte[] ContentHash { get; internal set; }
+#pragma warning restore CA1819 // Properties should not return arrays
+
+ ///
+ /// Properties returned when downloading a File
+ ///
+ public FileDownloadDetails Properties { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileDownloadInfo instances.
+ /// You can use DataLakeModelFactory.FileDownloadInfo instead.
+ ///
+ internal FileDownloadInfo() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemInfo.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemInfo.cs
new file mode 100644
index 000000000000..6054bf4c364e
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemInfo.cs
@@ -0,0 +1,19 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs.Models;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// FileSystemInfo
+ ///
+ public class FileSystemInfo : BlobContainerInfo
+ {
+ ///
+ /// Prevent direct instantiation of FileSystemInfo instances.
+ /// You can use DataLakeModelFactory.FileSystemInfo instead.
+ ///
+ internal FileSystemInfo() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemItem.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemItem.cs
new file mode 100644
index 000000000000..f75928d0f398
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemItem.cs
@@ -0,0 +1,35 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System.Collections.Generic;
+using Azure.Storage.Blobs.Models;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// An Azure Data Lake file system
+ ///
+ public class FileSystemItem
+ {
+ ///
+ /// Name
+ ///
+ public string Name { get; internal set; }
+
+ ///
+ /// Properties of a file system.
+ ///
+ public FileSystemProperties Properties { get; internal set; }
+
+ ///
+ /// Metadata
+ ///
+ public IDictionary Metadata { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileSystemItem instances.
+ /// You can use DataLakeModelFactory.FileSystemItem instead.
+ ///
+ internal FileSystemItem() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemProperties.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemProperties.cs
new file mode 100644
index 000000000000..eca799204331
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemProperties.cs
@@ -0,0 +1,59 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Properties of a file system.
+ ///
+ public class FileSystemProperties
+ {
+ ///
+ /// Last-Modified
+ ///
+ public DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// LeaseStatus
+ ///
+ public LeaseStatus? LeaseStatus { get; internal set; }
+
+ ///
+ /// LeaseState
+ ///
+ public LeaseState? LeaseState { get; internal set; }
+
+ ///
+ /// LeaseDuration
+ ///
+ public LeaseDurationType? LeaseDuration { get; internal set; }
+
+ ///
+ /// PublicAccess
+ ///
+ public PublicAccessType? PublicAccess { get; internal set; }
+
+ ///
+ /// HasImmutabilityPolicy
+ ///
+ public bool? HasImmutabilityPolicy { get; internal set; }
+
+ ///
+ /// HasLegalHold
+ ///
+ public bool? HasLegalHold { get; internal set; }
+
+ ///
+ /// ETag
+ ///
+ public ETag ETag { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of FileSystemProperties instances.
+ /// You can use BlobsModelFactory.FileSystemProperties instead.
+ ///
+ internal FileSystemProperties() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemTraits.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemTraits.cs
new file mode 100644
index 000000000000..eeccadbb52ed
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/FileSystemTraits.cs
@@ -0,0 +1,29 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Azure.Storage.Blobs.Models;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Specifies options for listing blob containers with the
+ /// operation.
+ ///
+ [Flags]
+ public enum FileSystemTraits
+ {
+ ///
+ /// Default flag specifying that no flags are set in .
+ ///
+ None = 0,
+
+ ///
+ /// Flag specifying that the container's metadata should
+ /// be included.
+ ///
+ Metadata = 1,
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetFileSystemsAsyncCollection.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetFileSystemsAsyncCollection.cs
new file mode 100644
index 000000000000..988fd6d37cc4
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetFileSystemsAsyncCollection.cs
@@ -0,0 +1,53 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+using Azure.Storage.Blobs;
+using Azure.Storage.Blobs.Models;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ internal class GetFileSystemsAsyncCollection : StorageCollectionEnumerator
+ {
+ private readonly BlobServiceClient _client;
+ private readonly FileSystemTraits _traits;
+ private readonly string _prefix;
+
+ public GetFileSystemsAsyncCollection(
+ BlobServiceClient client,
+ FileSystemTraits traits,
+ string prefix = default)
+ {
+ _client = client;
+ _traits = traits;
+ _prefix = prefix;
+ }
+
+ public override async ValueTask> GetNextPageAsync(
+ string continuationToken,
+ int? pageSizeHint,
+ bool isAsync,
+ CancellationToken cancellationToken)
+ {
+ Task> task = _client.GetBlobContainersInternal(
+ continuationToken,
+ (BlobContainerTraits)_traits,
+ _prefix,
+ pageSizeHint,
+ isAsync,
+ cancellationToken);
+ Response response = isAsync ?
+ await task.ConfigureAwait(false) :
+ task.EnsureCompleted();
+ return Page.FromValues(
+ response.Value.BlobContainerItems.Select(item => item.ToFileSystemItem()).ToArray(),
+ response.Value.NextMarker,
+ response.GetRawResponse());
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetPathsAsyncCollection.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetPathsAsyncCollection.cs
new file mode 100644
index 000000000000..390dff2cc4c6
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetPathsAsyncCollection.cs
@@ -0,0 +1,45 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ internal class GetPathsAsyncCollection : StorageCollectionEnumerator
+ {
+ private readonly FileSystemClient _client;
+ private readonly GetPathsOptions? _options;
+
+ public GetPathsAsyncCollection(
+ FileSystemClient client,
+ GetPathsOptions? options)
+ {
+ _client = client;
+ _options = options;
+ }
+
+ public override async ValueTask> GetNextPageAsync(
+ string continuationToken,
+ int? pageSizeHint,
+ bool isAsync,
+ CancellationToken cancellationToken)
+ {
+ Task> task = _client.ListPathsInternal(
+ _options,
+ continuationToken,
+ pageSizeHint,
+ isAsync,
+ cancellationToken);
+ Response response = isAsync ?
+ await task.ConfigureAwait(false) :
+ task.EnsureCompleted();
+
+ return Page.FromValues(
+ response.Value.Paths.ToArray(),
+ response.Value.Continuation,
+ response.GetRawResponse());
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetPathsOptions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetPathsOptions.cs
new file mode 100644
index 000000000000..3cc1a12f8dda
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/GetPathsOptions.cs
@@ -0,0 +1,82 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Text;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Specifies options for paths blobs
+ ///
+ public struct GetPathsOptions : IEquatable
+ {
+ ///
+ /// Filters results to paths within the specified directory.
+ ///
+ public string Path { get; set; }
+
+ ///
+ /// Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+ /// "true", the user identity values returned in the owner and group fields of each list
+ /// entry will be transformed from Azure Active Directory Object IDs to User Principal
+ /// Names. If "false", the values will be returned as Azure Active Directory Object IDs.
+ /// The default value is false. Note that group and application Object IDs are not translated
+ /// because they do not have unique friendly names.
+ ///
+ public bool Upn { get; set; }
+
+ ///
+ /// If "true", all paths are listed; otherwise, only paths at the root of the filesystem are listed.
+ /// If "directory" is specified, the list will only include paths that share the same root.
+ ///
+ public bool Recursive { get; set; }
+
+ ///
+ /// Check if two GetBlobsOptions instances are equal.
+ ///
+ /// The instance to compare to.
+ /// True if they're equal, false otherwise.
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override bool Equals(object obj) =>
+ obj is GetPathsOptions other && Equals(other);
+
+ ///
+ /// Get a hash code for the GetBlobsOptions.
+ ///
+ /// Hash code for the GetBlobsOptions.
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override int GetHashCode() =>
+ ((Upn ? 0b00001 : 0) ^
+ (Path?.GetHashCode() ?? 0));
+
+ ///
+ /// Check if two GetBlobsOptions instances are equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're equal, false otherwise.
+ public static bool operator ==(GetPathsOptions left, GetPathsOptions right) =>
+ left.Equals(right);
+
+ ///
+ /// Check if two GetBlobsOptions instances are not equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're not equal, false otherwise.
+ public static bool operator !=(GetPathsOptions left, GetPathsOptions right) =>
+ !(left == right);
+
+ ///
+ /// Check if two GetBlobsOptions instances are equal.
+ ///
+ /// The instance to compare to.
+ /// True if they're equal, false otherwise.
+ public bool Equals(GetPathsOptions other) =>
+ Path == other.Path &&
+ Upn == other.Upn;
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseDurationType.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseDurationType.cs
new file mode 100644
index 000000000000..fd353888667d
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseDurationType.cs
@@ -0,0 +1,20 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// LeaseDurationType values
+ ///
+ public enum LeaseDurationType
+ {
+ ///
+ /// infinite
+ ///
+ Infinite,
+
+ ///
+ /// fixed
+ ///
+ Fixed
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseState.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseState.cs
new file mode 100644
index 000000000000..29f8424f008f
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseState.cs
@@ -0,0 +1,36 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// LeaseState values
+ ///
+ public enum LeaseState
+ {
+ ///
+ /// available
+ ///
+ Available,
+
+ ///
+ /// leased
+ ///
+ Leased,
+
+ ///
+ /// expired
+ ///
+ Expired,
+
+ ///
+ /// breaking
+ ///
+ Breaking,
+
+ ///
+ /// broken
+ ///
+ Broken
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseStatus.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseStatus.cs
new file mode 100644
index 000000000000..091705d57dc2
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/LeaseStatus.cs
@@ -0,0 +1,23 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// LeaseStatus values
+ ///
+ #pragma warning disable CA1717 // Only FlagsAttribute enums should have plural names
+ public enum LeaseStatus
+ #pragma warning restore CA1717 // Only FlagsAttribute enums should have plural names
+ {
+ ///
+ /// locked
+ ///
+ Locked,
+
+ ///
+ /// unlocked
+ ///
+ Unlocked
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathAccessControl.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathAccessControl.cs
new file mode 100644
index 000000000000..43b526972d40
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathAccessControl.cs
@@ -0,0 +1,41 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// The Access Control for a Path.
+ ///
+ public class PathAccessControl
+ {
+ ///
+ /// The owner of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.
+ ///
+ public string Owner { get; internal set; }
+
+ ///
+ /// The owning group of the file or directory. Included in the response if Hierarchical Namespace is enabled for the account.
+ ///
+ public string Group { get; internal set; }
+
+ ///
+ /// The POSIX access permissions for the file owner, the file owning group, and others. Included in the response if Hierarchical Namespace is enabled for the account.
+ ///
+ public string Permissions { get; internal set; }
+
+ ///
+ /// The POSIX access control list for the file or directory. Included in the response only if Hierarchical Namespace is enabled for the account.
+ ///
+ public string Acl { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathAccessControl instances.
+ /// You can use DataLakeModelFactory.PathAccessControl instead.
+ ///
+ internal PathAccessControl() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathContentInfo.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathContentInfo.cs
new file mode 100644
index 000000000000..24694d826cba
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathContentInfo.cs
@@ -0,0 +1,85 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// PathContentInfo
+ ///
+ public class PathContentInfo
+ {
+ ///
+ /// An hash of the request content. This header is only returned for "Flush" operation.
+ /// This header is returned so that the client can check for message content integrity.
+ /// This header refers to the content of the request, not actual file content.
+ ///
+ public string ContentHash { get; internal set; }
+
+ ///
+ /// An HTTP entity tag associated with the file or directory.
+ ///
+ public ETag ETag { get; internal set; }
+
+ ///
+ /// The data and time the file or directory was last modified
+ /// Write operations on the file or directory update the last modified time.
+ ///
+ public DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// Indicates that the service supports requests for partial file content.
+ ///
+ public string AcceptRanges { get; internal set; }
+
+ ///
+ /// If the Cache-Control request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string CacheControl { get; internal set; }
+
+ ///
+ /// If the Content-Disposition request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentDisposition { get; internal set; }
+
+ ///
+ /// If the Content-Encoding request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentEncoding { get; internal set; }
+
+ ///
+ /// If the Content-Language request header has previously been set for the resource, that value is returned in this header.
+ ///
+ public string ContentLanguage { get; internal set; }
+
+ ///
+ /// The size of the resource in bytes.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// Indicates the range of bytes returned in the event that the client requested a subset of the file by
+ /// setting the Range request header.
+ ///
+ public string ContentRange { get; internal set; }
+
+ ///
+ /// The content type specified for the resource. If no content type was specified, the default content
+ /// type is application/octet-stream.
+ ///
+ public string ContentType { get; internal set; }
+
+ ///
+ /// Metadata for the path
+ ///
+ public IDictionary Metadata { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathContentInfo instances.
+ /// You can use DataLakeModelFactory.PathContentInfo instead.
+ ///
+ internal PathContentInfo() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathCreateInfo.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathCreateInfo.cs
new file mode 100644
index 000000000000..c22475ef5066
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathCreateInfo.cs
@@ -0,0 +1,35 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path CreateResult
+ ///
+ public class PathCreateInfo
+ {
+ ///
+ /// Path info for the file or directory.
+ ///
+ public PathInfo PathInfo { get; internal set; }
+
+ ///
+ /// When renaming a directory, the number of paths that are renamed with each invocation is limited.
+ /// If the number of paths to be renamed exceeds this limit, a continuation token is returned in this response header.
+ /// When a continuation token is returned in the response, it must be specified in a subsequent invocation of the rename
+ /// operation to continue renaming the directory.
+ ///
+ public string Continuation { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathCreateInfo instances.
+ /// You can use DataLakeModelFactory.PathCreateInfo instead.
+ ///
+ internal PathCreateInfo() { }
+
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathHttpHeaders.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathHttpHeaders.cs
new file mode 100644
index 000000000000..de60dd1f9750
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathHttpHeaders.cs
@@ -0,0 +1,113 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Standard HTTP properties supported by paths.
+ /// These properties are represented as standard HTTP headers use standard
+ /// names, as specified in the Header Field Definitions section 14 of the
+ /// HTTP/1.1 protocol specification.
+ ///
+ /// For more information, see .
+ ///
+ public struct PathHttpHeaders : IEquatable
+ {
+ ///
+ /// The MIME content type of the path.
+ ///
+ public string ContentType { get; set; }
+
+#pragma warning disable CA1819 // Properties should not return arrays
+ ///
+ /// An MD5 hash of the blob content. This hash is used to verify the
+ /// integrity of the path during transport. When this header is
+ /// specified, the storage service checks the hash that has arrived
+ /// with the one that was sent. If the two hashes do not match, the
+ /// operation will fail with error code 400 (Bad Request).
+ ///
+ public byte[] ContentHash { get; set; }
+
+ ///
+ /// Specifies which content encodings have been applied to the path.
+ /// This value is returned to the client when the Get path operation
+ /// is performed on the path resource. The client can use this value
+ /// when returned to decode the path content.
+ ///
+ public string ContentEncoding { get; set; }
+
+ ///
+ /// Specifies the natural language used by this resource.
+ ///
+ public string ContentLanguage { get; set; }
+#pragma warning restore CA1819 // Properties should not return arrays
+
+ ///
+ /// Conveys additional information about how to process the response
+ /// payload, and also can be used to attach additional metadata. For
+ /// example, if set to attachment, it indicates that the user-agent
+ /// should not display the response, but instead show a Save As dialog
+ /// with a filename other than the path name specified.
+ ///
+ public string ContentDisposition { get; set; }
+
+ ///
+ /// Specify directives for caching mechanisms.
+ ///
+ public string CacheControl { get; set; }
+
+ ///
+ /// Check if two PathHttpHeaders instances are equal.
+ ///
+ /// The instance to compare to.
+ /// True if they're equal, false otherwise.
+ public override bool Equals(object obj)
+ => obj is PathHttpHeaders other && Equals(other);
+
+ ///
+ /// Get a hash code for the PathHttpHeaders.
+ ///
+ /// Hash code for the PathHttpHeaders.
+ public override int GetHashCode()
+ => CacheControl.GetHashCode()
+ ^ ContentDisposition.GetHashCode()
+ ^ ContentEncoding.GetHashCode()
+ ^ ContentLanguage.GetHashCode()
+ ^ ContentHash.GetHashCode()
+ ^ ContentType.GetHashCode()
+ ;
+
+ ///
+ /// Check if two PathHttpHeaders instances are equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're equal, false otherwise.
+ public static bool operator ==(PathHttpHeaders left, PathHttpHeaders right) => left.Equals(right);
+
+ ///
+ /// Check if two PathHttpHeaders instances are not equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're not equal, false otherwise.
+ public static bool operator !=(PathHttpHeaders left, PathHttpHeaders right) => !(left == right);
+
+ ///
+ /// Check if two PathHttpHeaders instances are equal.
+ ///
+ /// The instance to compare to.
+ public bool Equals(PathHttpHeaders other)
+ => CacheControl == other.CacheControl
+ && ContentDisposition == other.ContentDisposition
+ && ContentEncoding == other.ContentEncoding
+ && ContentLanguage == other.ContentLanguage
+ && ContentHash == other.ContentHash
+ && ContentType == other.ContentType
+ ;
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathInfo.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathInfo.cs
new file mode 100644
index 000000000000..bb3f78927559
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathInfo.cs
@@ -0,0 +1,29 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// PathInfo
+ ///
+ public class PathInfo
+ {
+ ///
+ /// The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer, the ETag value will be in quotes.
+ ///
+ public ETag ETag { get; internal set; }
+
+ ///
+ /// Returns the date and time the blob was last modified. Any operation that modifies the blob, including an update of the blob's metadata or properties, changes the last-modified time of the blob.
+ ///
+ public DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of BlobInfo instances.
+ /// You can use BlobsModelFactory.BlobInfo instead.
+ ///
+ internal PathInfo() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathItem.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathItem.cs
new file mode 100644
index 000000000000..c364572ece9a
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathItem.cs
@@ -0,0 +1,59 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Path
+ ///
+ public class PathItem
+ {
+ ///
+ /// name
+ ///
+ public string Name { get; internal set; }
+
+ ///
+ /// isDirectory
+ ///
+ public bool? IsDirectory { get; internal set; }
+
+ ///
+ /// lastModified
+ ///
+ public DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// eTag
+ ///
+ public ETag ETag { get; internal set; }
+
+ ///
+ /// contentLength
+ ///
+ public long? ContentLength { get; internal set; }
+
+ ///
+ /// owner
+ ///
+ public string Owner { get; internal set; }
+
+ ///
+ /// group
+ ///
+ public string Group { get; internal set; }
+
+ ///
+ /// permissions
+ ///
+ public string Permissions { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathItem instances.
+ /// You can use DataLakeModelFactory.PathItem instead.
+ ///
+ internal PathItem() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathProperties.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathProperties.cs
new file mode 100644
index 000000000000..cbe9c0d355a8
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathProperties.cs
@@ -0,0 +1,180 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// PathProperties
+ ///
+ public class PathProperties
+ {
+ ///
+ /// Returns the date and time the path was last modified. Any operation that modifies the path,
+ /// including an update of the path's metadata or properties, changes the last-modified time of the path.
+ ///
+ public DateTimeOffset LastModified { get; internal set; }
+
+ ///
+ /// Returns the date and time the path was created.
+ ///
+ public DateTimeOffset CreatedOn { get; internal set; }
+
+ ///
+ /// The Path's metdata.
+ ///
+ public IDictionary Metadata { get; internal set; }
+
+ ///
+ /// Conclusion time of the last attempted Copy Blob operation where this path was the destination path.
+ /// This value can specify the time of a completed, aborted, or failed copy attempt. This header does not appear
+ /// if a copy is pending, if this path has never been the destination in a Copy path operation, or if this path
+ /// has been modified after a concluded Copy Blob operation using Set path Properties, Put path, or Put Block List.
+ ///
+ public DateTimeOffset CopyCompletedOn { get; internal set; }
+
+ ///
+ /// Only appears when x-ms-copy-status is failed or pending. Describes the cause of the last fatal or non-fatal copy
+ /// operation failure. This header does not appear if this path has never been the destination in a Copy Blob operation,
+ /// or if this path has been modified after a concluded Copy Blob operation using Set Path Properties, Put Blob, or Put Block List
+ ///
+ public string CopyStatusDescription { get; internal set; }
+
+ ///
+ /// String identifier for this copy operation. Use with Get Path Properties to check the status of this copy operation,
+ /// or pass to Abort Copy Blob to abort a pending copy.
+ ///
+ public string CopyId { get; internal set; }
+
+ ///
+ /// Contains the number of bytes copied and the total bytes in the source in the last attempted Copy Blob operation where this
+ /// path was the destination pth. Can show between 0 and Content-Length bytes copied. This header does not appear if this
+ /// path has never been the destination in a Copy Blob operation, or if this path has been modified after a concluded Copy
+ /// Blob operation using Set Blob Properties, Put Blob, or Put Block List
+ ///
+ public string CopyProgress { get; internal set; }
+
+ ///
+ /// URL up to 2 KB in length that specifies the source path or file used in the last attempted Copy Blob operation where
+ /// this path was the destination path. This header does not appear if this path has never been the destination in a Copy
+ /// Blob operation, or if this path has been modified after a concluded Copy Blob operation using Set Path Properties, Put
+ /// Blob, or Put Block List.
+ ///
+ public Uri CopySource { get; internal set; }
+
+ ///
+ /// State of the copy operation identified by x-ms-copy-id.
+ ///
+ public CopyStatus CopyStatus { get; internal set; }
+
+ ///
+ /// Included if the path is incremental copy blob.
+ ///
+ public bool IsIncrementalCopy { get; internal set; }
+
+ ///
+ /// When a path is leased, specifies whether the lease is of infinite or fixed duration.
+ ///
+ public LeaseDurationType LeaseDuration { get; internal set; }
+
+ ///
+ /// Lease state of the path.
+ ///
+ public LeaseState LeaseState { get; internal set; }
+
+ ///
+ /// The current lease status of the path.
+ ///
+ public LeaseStatus LeaseStatus { get; internal set; }
+
+ ///
+ /// The number of bytes present in the response body.
+ ///
+ public long ContentLength { get; internal set; }
+
+ ///
+ /// The content type specified for the path. The default content type is 'application/octet-stream'
+ ///
+ public string ContentType { get; internal set; }
+
+ ///
+ /// The ETag contains a value that you can use to perform operations conditionally. If the request version is 2011-08-18 or newer,
+ /// the ETag value will be in quotes.
+ ///
+ public ETag ETag { get; internal set; }
+
+ ///
+ /// If the path has an MD5 hash and this operation is to read the full path, this response header is returned so that the client can
+ /// check for message content integrity.
+ ///
+#pragma warning disable CA1819 // Properties should not return arrays
+ public byte[] ContentHash { get; internal set; }
+#pragma warning restore CA1819 // Properties should not return arrays
+
+ ///
+ /// This header returns the value that was specified for the Content-Encoding request header
+ ///
+ public IEnumerable ContentEncoding { get; internal set; }
+
+ ///
+ /// This header returns the value that was specified for the 'x-ms-blob-content-disposition' header.
+ /// The Content-Disposition response header field conveys additional information about how to process the response payload,
+ /// and also can be used to attach additional metadata. For example, if set to attachment, it indicates that the user-agent
+ /// should not display the response, but instead show a Save As dialog with a filename other than the path name specified.
+ ///
+ public string ContentDisposition { get; internal set; }
+
+ ///
+ /// This header returns the value that was specified for the Content-Language request header.
+ ///
+ public IEnumerable ContentLanguage { get; internal set; }
+
+ ///
+ /// This header is returned if it was previously specified for the path.
+ ///
+ public string CacheControl { get; internal set; }
+
+ ///
+ /// Indicates that the service supports requests for partial path content.
+ ///
+ public string AcceptRanges { get; internal set; }
+
+ ///
+ /// The value of this header is set to true if the path data and application metadata are completely encrypted using the
+ /// specified algorithm. Otherwise, the value is set to false (when the path is unencrypted, or if only parts of the path/application
+ /// metadata are encrypted).
+ ///
+ public bool IsServerEncrypted { get; internal set; }
+
+ ///
+ /// The SHA-256 hash of the encryption key used to encrypt the metadata. This header is only returned when the metadata
+ /// was encrypted with a customer-provided key.
+ ///
+ public string EncryptionKeySha256 { get; internal set; }
+
+ ///
+ /// The tier of block blob on blob storage LRS accounts. For blob storage LRS accounts, valid values are Hot/Cool/Archive.
+ ///
+ public string AccessTier { get; internal set; }
+
+ ///
+ /// For blob storage LRS accounts, valid values are rehydrate-pending-to-hot/rehydrate-pending-to-cool.
+ /// If the blob is being rehydrated and is not complete then this header is returned indicating that rehydrate is pending
+ /// and also tells the destination tier.
+ ///
+ public string ArchiveStatus { get; internal set; }
+
+ ///
+ /// The time the tier was changed on the object. This is only returned if the tier on the block blob was ever set.
+ ///
+ public DateTimeOffset AccessTierChangedOn { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathProperties instances.
+ /// You can use DataLakeModelFactory.PathProperties instead.
+ ///
+ internal PathProperties() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathSegment.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathSegment.cs
new file mode 100644
index 000000000000..347ead7f622a
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PathSegment.cs
@@ -0,0 +1,31 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System.Collections.Generic;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// An enumeration of paths.
+ ///
+ public class PathSegment
+ {
+ ///
+ /// If the number of paths to be listed exceeds the maxResults limit, a continuation token is returned.
+ /// When a continuation token is returned in the response, it must be specified in a subsequent invocation
+ /// of the list operation to continue listing the paths.
+ ///
+ public string Continuation { get; internal set; }
+
+ ///
+ /// PathItems
+ ///
+ public IEnumerable Paths { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of PathSegment instances.
+ /// You can use DataLakeModelFactory.PathSegment instead.
+ ///
+ internal PathSegment() { }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PublicAccessType.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PublicAccessType.cs
new file mode 100644
index 000000000000..5afbf039de36
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/PublicAccessType.cs
@@ -0,0 +1,27 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+
+ ///
+ /// Specifies whether data in the container may be accessed publicly and the level of access
+ ///
+ public enum PublicAccessType
+ {
+ ///
+ /// none
+ ///
+ None,
+
+ ///
+ /// container
+ ///
+ Container,
+
+ ///
+ /// blob
+ ///
+ Blob
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/ReleasedObjectInfo.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/ReleasedObjectInfo.cs
new file mode 100644
index 000000000000..8e6b9d79d99b
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/ReleasedObjectInfo.cs
@@ -0,0 +1,116 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.ComponentModel;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// Provides the version state of a succesfully released blob or container
+ /// object.
+ ///
+ public readonly struct ReleasedObjectInfo : IEquatable
+ {
+ ///
+ /// The ETag contains a value that you can use to perform operations
+ /// conditionally. If the request version is 2011-08-18 or newer, the
+ /// ETag value will be in quotes.
+ ///
+ public ETag ETag { get; }
+
+ ///
+ /// Returns the date and time the object was last modified. Any
+ /// operation that modifies the blob or container, including an update
+ /// of the object's metadata or properties, changes the last-modified
+ /// time of the object.
+ ///
+ public DateTimeOffset LastModified { get; }
+
+ ///
+ /// Creates a new .
+ ///
+ ///
+ /// The contains a value that you can use to perform
+ /// operations conditionally.
+ ///
+ ///
+ /// The date and time the object was last modified.
+ ///
+ public ReleasedObjectInfo(ETag eTag, DateTimeOffset lastModified)
+ {
+ ETag = eTag;
+ LastModified = lastModified;
+ }
+
+ ///
+ /// Creates a new .
+ ///
+ /// A released .
+ internal ReleasedObjectInfo(PathInfo info)
+ : this(info.ETag, info.LastModified)
+ {
+ }
+
+ ///
+ /// Creates a new .
+ ///
+ /// A released .
+ internal ReleasedObjectInfo(FileSystemInfo info)
+ : this(info.ETag, info.LastModified)
+ {
+ }
+
+ ///
+ /// Creates a string representation of a
+ /// .
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override string ToString() => base.ToString();
+
+ ///
+ /// Check if two instances are equal.
+ ///
+ /// The instance to compare to.
+ /// True if they're equal, false otherwise.
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override bool Equals(object obj) =>
+ obj is ReleasedObjectInfo other && Equals(other);
+
+ ///
+ /// Check if two instances are equal.
+ ///
+ /// The instance to compare to.
+ /// True if they're equal, false otherwise.
+ public bool Equals(ReleasedObjectInfo other) =>
+ ETag == other.ETag &&
+ LastModified == other.LastModified;
+
+ ///
+ /// Get a hash code for the .
+ ///
+ /// Hash code for the .
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override int GetHashCode() =>
+ ETag.GetHashCode() ^
+ LastModified.GetHashCode();
+
+ ///
+ /// Check if two instances are equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're equal, false otherwise.
+ public static bool operator ==(ReleasedObjectInfo left, ReleasedObjectInfo right) =>
+ left.Equals(right);
+
+ ///
+ /// Check if two instances are not equal.
+ ///
+ /// The first instance to compare.
+ /// The second instance to compare.
+ /// True if they're not equal, false otherwise.
+ public static bool operator !=(ReleasedObjectInfo left, ReleasedObjectInfo right) =>
+ !(left == right);
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Models/UserDelegationKey.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/UserDelegationKey.cs
new file mode 100644
index 000000000000..a3f761e2362e
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Models/UserDelegationKey.cs
@@ -0,0 +1,67 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Azure.Storage.Files.DataLake.Models
+{
+ ///
+ /// A user delegation key
+ ///
+ public class UserDelegationKey
+ {
+ ///
+ /// The Azure Active Directory object ID in GUID format.
+ ///
+ public string SignedObjectId { get; internal set; }
+
+ ///
+ /// The Azure Active Directory tenant ID in GUID format
+ ///
+ public string SignedTenantId { get; internal set; }
+
+ ///
+ /// The date-time the key is active
+ ///
+ public DateTimeOffset SignedStartsOn { get; internal set; }
+
+ ///
+ /// The date-time the key expires
+ ///
+ public DateTimeOffset SignedExpiresOn { get; internal set; }
+
+ ///
+ /// Abbreviation of the Azure Storage service that accepts the key
+ ///
+ public string SignedService { get; internal set; }
+
+ ///
+ /// The service version that created the key
+ ///
+ public string SignedVersion { get; internal set; }
+
+ ///
+ /// The key as a base64 string
+ ///
+ public string Value { get; internal set; }
+
+ ///
+ /// Prevent direct instantiation of UserDelegationKey instances.
+ /// You can use DataLakeModelFactory.UserDelegationKey instead.
+ ///
+ internal UserDelegationKey() { }
+
+ internal UserDelegationKey(Blobs.Models.UserDelegationKey blobUserDelegationKey)
+ {
+ SignedObjectId = blobUserDelegationKey.SignedObjectId;
+ SignedTenantId = blobUserDelegationKey.SignedTenantId;
+ SignedStartsOn = blobUserDelegationKey.SignedStartsOn;
+ SignedExpiresOn = blobUserDelegationKey.SignedExpiresOn;
+ SignedService = blobUserDelegationKey.SignedService;
+ SignedVersion = blobUserDelegationKey.SignedVersion;
+ Value = blobUserDelegationKey.Value;
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/PathClient.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/PathClient.cs
new file mode 100644
index 000000000000..b0d68f5b02a3
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/PathClient.cs
@@ -0,0 +1,1462 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+using Azure.Core;
+using Azure.Core.Pipeline;
+using Azure.Storage.Blobs.Specialized;
+using Azure.Storage.Files.DataLake.Models;
+using Metadata = System.Collections.Generic.IDictionary;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// A PathClient represents a URI to the Azure DataLake service allowing you to manipulate a file or directory.
+ ///
+ public class PathClient
+ {
+ ///
+ /// A associated with the path;
+ ///
+ internal readonly BlockBlobClient _blockBlobClient;
+
+ ///
+ /// BlobClient
+ ///
+ internal virtual BlockBlobClient BlobClient => _blockBlobClient;
+
+ ///
+ /// The paths's primary endpoint.
+ ///
+ private readonly Uri _uri;
+
+ ///
+ /// The paths's blob endpoint.
+ ///
+ private readonly Uri _blobUri;
+
+ ///
+ /// The path's dfs endpoint.
+ ///
+ private readonly Uri _dfsUri;
+
+ ///
+ /// DFS Uri
+ ///
+ internal Uri DfsUri => _dfsUri;
+
+ ///
+ /// Gets the directory's primary endpoint.
+ ///
+ public virtual Uri Uri => _uri;
+
+ ///
+ /// The transport pipeline used to send
+ /// every request.
+ ///
+ private readonly HttpPipeline _pipeline;
+
+ ///
+ /// Gets the transport pipeline used to send
+ /// every request.
+ ///
+ internal virtual HttpPipeline Pipeline => _pipeline;
+
+ ///
+ /// The instance used to create diagnostic scopes
+ /// every request.
+ ///
+ private readonly ClientDiagnostics _clientDiagnostics;
+
+ ///
+ /// The instance used to create diagnostic scopes
+ /// every request.
+ ///
+ internal virtual ClientDiagnostics ClientDiagnostics => _clientDiagnostics;
+
+ ///
+ /// The Storage account name corresponding to the directory client.
+ ///
+ private string _accountName;
+
+ ///
+ /// Gets the Storage account name corresponding to the directory client.
+ ///
+ public virtual string AccountName
+ {
+ get
+ {
+ SetNameFieldsIfNull();
+ return _accountName;
+ }
+ }
+
+ ///
+ /// The file system name corresponding to the directory client.
+ ///
+ private string _fileSystemName;
+
+ ///
+ /// Gets the file system name name corresponding to the directory client.
+ ///
+ public virtual string FileSystemName
+ {
+ get
+ {
+ SetNameFieldsIfNull();
+ return _fileSystemName;
+ }
+ }
+
+ #region ctors
+ ///
+ /// Initializes a new instance of the
+ /// class for mocking.
+ ///
+ protected PathClient()
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the resource that includes the
+ /// name of the account, the name of the file system, and the path to the
+ /// resource.
+ ///
+ ///
+ /// Optional that define the transport
+ /// pipeline policies for authentication, retries, etc., that are
+ /// applied to every request.
+ ///
+ public PathClient(Uri pathUri, DataLakeClientOptions options = default)
+ : this(pathUri, (HttpPipelinePolicy)null, options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the resource that includes the
+ /// name of the account, the name of the file system, and the path to the
+ /// resource.
+ ///
+ ///
+ /// The shared key credential used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ public PathClient(Uri pathUri, StorageSharedKeyCredential credential, DataLakeClientOptions options = default)
+ : this(pathUri, credential.AsPolicy(), options)
+ {
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the path that includes the
+ /// name of the account, the name of the file system, and the path to
+ /// the resource.
+ ///
+ ///
+ /// An optional authentication policy used to sign requests.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ /// every request.
+ ///
+ internal PathClient(Uri pathUri, HttpPipelinePolicy authentication, DataLakeClientOptions options)
+ {
+ _uri = pathUri;
+ _blobUri = GetBlobUri(pathUri);
+ _dfsUri = GetDfsUri(pathUri);
+ _pipeline = (options ?? new DataLakeClientOptions()).Build(authentication);
+ _clientDiagnostics = new ClientDiagnostics(options);
+ _blockBlobClient = new BlockBlobClient(_blobUri, _pipeline, _clientDiagnostics, null);
+
+ }
+
+ ///
+ /// Initializes a new instance of the
+ /// class.
+ ///
+ ///
+ /// A referencing the directory that includes the
+ /// name of the account, the name of the file system, and the path to the
+ /// resource.
+ ///
+ ///
+ /// The transport pipeline used to send every request.
+ ///
+ ///
+ /// Optional client options that define the transport pipeline
+ /// policies for authentication, retries, etc., that are applied to
+ ///
+ internal PathClient(Uri pathUri, HttpPipeline pipeline, DataLakeClientOptions options = default)
+ {
+ _uri = pathUri;
+ _blobUri = GetBlobUri(pathUri);
+ _dfsUri = GetDfsUri(pathUri);
+ _pipeline = pipeline;
+ _clientDiagnostics = new ClientDiagnostics(options ?? new DataLakeClientOptions());
+ _blockBlobClient = new BlockBlobClient(_blobUri, pipeline, _clientDiagnostics, null);
+ }
+ #endregion
+
+ ///
+ /// Gets the blob Uri.
+ ///
+ private static Uri GetBlobUri(Uri uri)
+ {
+ Uri blobUri;
+ if (uri.Host.Contains(Constants.DataLake.DfsUriSuffix))
+ {
+ UriBuilder uriBuilder = new UriBuilder(uri);
+ uriBuilder.Host = uriBuilder.Host.Replace(
+ Constants.DataLake.DfsUriSuffix,
+ Constants.DataLake.BlobUriSuffix);
+ blobUri = uriBuilder.Uri;
+ }
+ else
+ {
+ blobUri = uri;
+ }
+ return blobUri;
+ }
+
+ ///
+ /// Gets the dfs Uri.
+ ///
+ private static Uri GetDfsUri(Uri uri)
+ {
+ Uri dfsUri;
+ if (uri.Host.Contains(Constants.DataLake.BlobUriSuffix))
+ {
+ UriBuilder uriBuilder = new UriBuilder(uri);
+ uriBuilder.Host = uriBuilder.Host.Replace(
+ Constants.DataLake.BlobUriSuffix,
+ Constants.DataLake.DfsUriSuffix);
+ dfsUri = uriBuilder.Uri;
+ }
+ else
+ {
+ dfsUri = uri;
+ }
+ return dfsUri;
+ }
+
+ ///
+ /// Converts metadata in DFS metadata string
+ ///
+ protected static string BuildMetadataString(Metadata metadata)
+ {
+ if (metadata == null)
+ {
+ return null;
+ }
+ StringBuilder sb = new StringBuilder();
+ foreach (KeyValuePair kv in metadata)
+ {
+ sb.Append(kv.Key);
+ sb.Append("=");
+ byte[] valueBytes = Encoding.UTF8.GetBytes(kv.Value);
+ sb.Append(Convert.ToBase64String(valueBytes));
+ sb.Append(",");
+ }
+ sb.Remove(sb.Length - 1, 1);
+ return sb.ToString();
+ }
+
+ ///
+ /// Sets the various name fields if they are currently null.
+ ///
+ protected virtual void SetNameFieldsIfNull()
+ {
+ if (_fileSystemName == null || _accountName == null)
+ {
+ var builder = new DataLakeUriBuilder(Uri);
+ _fileSystemName = builder.FileSystemName;
+ _accountName = builder.AccountName;
+ }
+ }
+
+ #region Create
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Resource type of this path - file or directory.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory..
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ protected virtual Response Create(
+ PathResourceType resourceType,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ CreateInternal(
+ resourceType,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ false, // async
+ cancellationToken)
+ .EnsureCompleted();
+
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Resource type of this path - file or directory.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory.
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory..
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> CreateAsync(
+ PathResourceType resourceType,
+ PathHttpHeaders? httpHeaders = default,
+ Metadata metadata = default,
+ string permissions = default,
+ string umask = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await CreateInternal(
+ resourceType,
+ httpHeaders,
+ metadata,
+ permissions,
+ umask,
+ conditions,
+ true, // async
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ ///
+ /// The operation creates a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// Resource type of this path - file or directory.
+ ///
+ ///
+ /// Optional standard HTTP header properties that can be set for the
+ /// new file or directory.
+ ///
+ ///
+ /// Optional custom metadata to set for this file or directory.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access
+ /// permissions for the file owner, the file owning group, and others. Each class may be granted read,
+ /// write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit
+ /// octal notation (e.g. 0766) are supported.
+ ///
+ ///
+ /// Optional and only valid if Hierarchical Namespace is enabled for the account.
+ /// When creating a file or directory and the parent folder does not have a default ACL,
+ /// the umask restricts the permissions of the file or directory to be created. The resulting
+ /// permission is given by p bitwise-and ^u, where p is the permission and u is the umask. For example,
+ /// if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is
+ /// 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ /// in 4-digit octal notation (e.g. 0766).
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory..
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ private async Task> CreateInternal(
+ PathResourceType resourceType,
+ PathHttpHeaders? httpHeaders,
+ Metadata metadata,
+ string permissions,
+ string umask,
+ DataLakeRequestConditions conditions,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(PathClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(PathClient),
+ message:
+ $"{nameof(Uri)}: {Uri}\n" +
+ $"{nameof(httpHeaders)}: {httpHeaders}\n" +
+ $"{nameof(metadata)}: {metadata}\n" +
+ $"{nameof(permissions)}: {permissions}\n" +
+ $"{nameof(umask)}: {umask}\n");
+ try
+ {
+
+ Response createResponse = await DataLakeRestClient.Path.CreateAsync(
+ clientDiagnostics: _clientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: _dfsUri,
+ resource: resourceType,
+ cacheControl: httpHeaders?.CacheControl,
+ contentEncoding: httpHeaders?.ContentEncoding,
+ contentDisposition: httpHeaders?.ContentDisposition,
+ contentType: httpHeaders?.ContentType,
+ contentLanguage: httpHeaders?.ContentLanguage,
+ leaseId: conditions?.LeaseId,
+ properties: BuildMetadataString(metadata),
+ permissions: permissions,
+ umask: umask,
+ ifMatch: conditions?.IfMatch,
+ ifNoneMatch: conditions?.IfNoneMatch,
+ ifModifiedSince: conditions?.IfModifiedSince,
+ ifUnmodifiedSince: conditions?.IfUnmodifiedSince,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new PathInfo()
+ {
+ ETag = createResponse.Value.ETag,
+ LastModified = createResponse.Value.LastModified
+ },
+ createResponse.GetRawResponse());
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(PathClient));
+ }
+ }
+ }
+ #endregion Create
+
+ #region Delete
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Required and valid only when the resource is a directory. If "true", all paths beneath the directory will be deleted.
+ /// If "false" and the directory is non-empty, an error occurs.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ public virtual Response Delete(
+ bool? recursive = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ DeleteInternal(
+ recursive,
+ conditions,
+ false, // async
+ cancellationToken)
+ .EnsureCompleted();
+
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Required and valid only when the resource is a directory. If "true", all paths beneath the directory will be deleted.
+ /// If "false" and the directory is non-empty, an error occurs.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ public virtual async Task DeleteAsync(
+ bool? recursive = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await DeleteInternal(
+ recursive,
+ conditions,
+ true, // async
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ ///
+ /// The operation marks the specified path
+ /// deletion. The path is later deleted during
+ /// garbage collection.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Required and valid only when the resource is a directory. If "true", all paths beneath the directory will be deleted.
+ /// If "false" and the directory is non-empty, an error occurs.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// deleting this path.
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A on successfully deleting.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ private async Task DeleteInternal(
+ bool? recursive,
+ DataLakeRequestConditions conditions,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(PathClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(BlobBaseClient),
+ message:
+ $"{nameof(Uri)}: {Uri}\n" +
+ $"{nameof(recursive)}: {recursive}\n" +
+ $"{nameof(conditions)}: {conditions}");
+ try
+ {
+ Response response = await DataLakeRestClient.Path.DeleteAsync(
+ clientDiagnostics: _clientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: _dfsUri,
+ recursive: recursive,
+ leaseId: conditions?.LeaseId,
+ ifMatch: conditions?.IfMatch,
+ ifNoneMatch: conditions?.IfNoneMatch,
+ ifModifiedSince: conditions?.IfModifiedSince,
+ ifUnmodifiedSince: conditions?.IfUnmodifiedSince,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return response.GetRawResponse();
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(PathClient));
+ }
+ }
+ }
+ #endregion Delete
+
+ #region Rename
+ ///
+ /// The operation renames a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The destination path to rename the path to.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the source on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response Rename(
+ string destinationPath,
+ DataLakeRequestConditions destConditions = default,
+ DataLakeRequestConditions sourceConditions = default,
+ CancellationToken cancellationToken = default) =>
+ RenameInternal(
+ destinationPath,
+ destConditions,
+ sourceConditions,
+ false, // async
+ cancellationToken)
+ .EnsureCompleted();
+
+ ///
+ /// The operation renames a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The destination path to rename the path to.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the source on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> RenameAsync(
+ string destinationPath,
+ DataLakeRequestConditions destConditions = default,
+ DataLakeRequestConditions sourceConditions = default,
+ CancellationToken cancellationToken = default) =>
+ await RenameInternal(
+ destinationPath,
+ destConditions,
+ sourceConditions,
+ true, // async
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ ///
+ /// The operation renames a file or directory.
+ ///
+ /// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/datalakestoragegen2/path/create.
+ ///
+ ///
+ /// The destination path to rename the path to.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the creation of this file or directory.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on the source on the creation of this file or directory.
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// newly created page blob.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ private async Task> RenameInternal(
+ string destinationPath,
+ DataLakeRequestConditions destConditions,
+ DataLakeRequestConditions sourceConditions,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(PathClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(PathClient),
+ message:
+ $"{nameof(Uri)}: {Uri}\n" +
+ $"{nameof(destinationPath)}: {destinationPath}\n" +
+ $"{nameof(destConditions)}: {destConditions}\n" +
+ $"{nameof(sourceConditions)}: {sourceConditions}");
+ try
+ {
+ DataLakeUriBuilder uriBuilder = new DataLakeUriBuilder(_dfsUri);
+ string renameSource = "/" + uriBuilder.FileSystemName + "/" + uriBuilder.DirectoryOrFilePath;
+
+ uriBuilder.DirectoryOrFilePath = destinationPath;
+ PathClient destPathClient = new PathClient(uriBuilder.ToUri(), Pipeline);
+
+ Response response = await DataLakeRestClient.Path.CreateAsync(
+ clientDiagnostics: _clientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: destPathClient.DfsUri,
+ mode: PathRenameMode.Legacy,
+ renameSource: renameSource,
+ leaseId: destConditions?.LeaseId,
+ sourceLeaseId: sourceConditions?.LeaseId,
+ ifMatch: destConditions?.IfMatch,
+ ifNoneMatch: destConditions?.IfNoneMatch,
+ ifModifiedSince: destConditions?.IfModifiedSince,
+ ifUnmodifiedSince: destConditions?.IfUnmodifiedSince,
+ sourceIfMatch: sourceConditions?.IfMatch,
+ sourceIfNoneMatch: sourceConditions?.IfNoneMatch,
+ sourceIfModifiedSince: sourceConditions?.IfModifiedSince,
+ sourceIfUnmodifiedSince: sourceConditions?.IfUnmodifiedSince,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ destPathClient,
+ response.GetRawResponse());
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(PathClient));
+ }
+ }
+ }
+ #endregion Rename
+
+ #region Get Access Control
+ ///
+ /// The operation returns the
+ /// access control data for a path.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional.Valid only when Hierarchical Namespace is enabled for the account.If "true",
+ /// the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+ /// headers will be transformed from Azure Active Directory Object IDs to User Principal Names.
+ /// If "false", the values will be returned as Azure Active Directory Object IDs.The default
+ /// value is false. Note that group and application Object IDs are not translated because they
+ /// do not have unique friendly names.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on getting the path's access control.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// path's access control.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response GetAccessControl(
+ bool? upn = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ GetAccessControlInternal(
+ upn,
+ conditions,
+ false, // async
+ cancellationToken)
+ .EnsureCompleted();
+
+ ///
+ /// The operation returns the
+ /// access control data for a path.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional.Valid only when Hierarchical Namespace is enabled for the account.If "true",
+ /// the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+ /// headers will be transformed from Azure Active Directory Object IDs to User Principal Names.
+ /// If "false", the values will be returned as Azure Active Directory Object IDs.The default
+ /// value is false. Note that group and application Object IDs are not translated because they
+ /// do not have unique friendly names.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on getting the path's access control.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// path's access control.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> GetAccessControlAsync(
+ bool? upn = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await GetAccessControlInternal(
+ upn,
+ conditions,
+ true, // async
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ ///
+ /// The operation returns the
+ /// access control data for a path.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional.Valid only when Hierarchical Namespace is enabled for the account.If "true",
+ /// the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+ /// headers will be transformed from Azure Active Directory Object IDs to User Principal Names.
+ /// If "false", the values will be returned as Azure Active Directory Object IDs.The default
+ /// value is false. Note that group and application Object IDs are not translated because they
+ /// do not have unique friendly names.
+ ///
+ ///
+ /// Optional to add
+ /// conditions on getting the path's access control.
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// path's access control.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ private async Task> GetAccessControlInternal(
+ bool? upn,
+ DataLakeRequestConditions conditions,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(PathClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(PathClient),
+ message:
+ $"{nameof(Uri)}: {Uri}\n");
+ try
+ {
+ Response response = await DataLakeRestClient.Path.GetPropertiesAsync(
+ clientDiagnostics: _clientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: _dfsUri,
+ action: PathGetPropertiesAction.GetAccessControl,
+ upn: upn,
+ leaseId: conditions?.LeaseId,
+ ifMatch: conditions?.IfMatch,
+ ifNoneMatch: conditions?.IfNoneMatch,
+ ifModifiedSince: conditions?.IfModifiedSince,
+ ifUnmodifiedSince: conditions?.IfUnmodifiedSince,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new PathAccessControl()
+ {
+ Owner = response.Value.Owner,
+ Group = response.Value.Group,
+ Permissions = response.Value.Permissions,
+ Acl = response.Value.ACL
+ },
+ response.GetRawResponse());
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(PathClient));
+ }
+ }
+ }
+ #endregion Get Access Control
+
+ #region Set Access Control
+ ///
+ /// The operation sets the
+ /// Access Control on a path
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The access control to set.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// setting the the path's access control.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the updated
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response SetAccessControl(
+ PathAccessControl accessControl,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ SetAccessControlInternal(
+ accessControl,
+ conditions,
+ false, // async
+ cancellationToken)
+ .EnsureCompleted();
+
+ ///
+ /// The operation sets the
+ /// Access Control on a path
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The access control to set.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// setting the the path's access control.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the updated
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> SetAccessControlAsync(
+ PathAccessControl accessControl,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default) =>
+ await SetAccessControlInternal(
+ accessControl,
+ conditions,
+ true, // async
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ ///
+ /// The operation sets the
+ /// Access Control on a path
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// The access control to set.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// setting the the path's access control.
+ ///
+ ///
+ /// Whether to invoke the operation asynchronously.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the updated
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ private async Task> SetAccessControlInternal(
+ PathAccessControl accessControl,
+ DataLakeRequestConditions conditions,
+ bool async,
+ CancellationToken cancellationToken)
+ {
+ using (Pipeline.BeginLoggingScope(nameof(PathClient)))
+ {
+ Pipeline.LogMethodEnter(
+ nameof(PathClient),
+ message:
+ $"{nameof(Uri)}: {Uri}\n" +
+ $"{nameof(accessControl)}: {accessControl}\n" +
+ $"{nameof(conditions)}: {conditions}");
+ try
+ {
+ Response response =
+ await DataLakeRestClient.Path.SetAccessControlAsync(
+ clientDiagnostics: _clientDiagnostics,
+ pipeline: Pipeline,
+ resourceUri: _dfsUri,
+ leaseId: conditions?.LeaseId,
+ owner: accessControl.Owner,
+ group: accessControl.Group,
+ permissions: accessControl.Permissions,
+ acl: accessControl.Acl,
+ ifMatch: conditions?.IfMatch,
+ ifNoneMatch: conditions?.IfNoneMatch,
+ ifModifiedSince: conditions?.IfModifiedSince,
+ ifUnmodifiedSince: conditions?.IfUnmodifiedSince,
+ async: async,
+ cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new PathInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+ catch (Exception ex)
+ {
+ Pipeline.LogException(ex);
+ throw;
+ }
+ finally
+ {
+ Pipeline.LogMethodExit(nameof(PathClient));
+ }
+ }
+ }
+ #endregion Set Access Control
+
+ #region Get Properties
+ ///
+ /// The operation returns all
+ /// user-defined metadata, standard HTTP properties, and system
+ /// properties for the path. It does not return the content of the
+ /// path.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on getting the path's properties.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// path's properties.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response GetProperties(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blockBlobClient.GetProperties(
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ response.Value.ToPathProperties(),
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation returns all
+ /// user-defined metadata, standard HTTP properties, and system
+ /// properties for the path. It does not return the content of the
+ /// path.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional to add
+ /// conditions on getting the path's properties.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the
+ /// paths's properties.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> GetPropertiesAsync(
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blockBlobClient.GetPropertiesAsync(
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ response.Value.ToPathProperties(),
+ response.GetRawResponse());
+ }
+ #endregion Get Properties
+
+ #region Set Http Headers
+ ///
+ /// The operation sets system
+ /// properties on the path.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional. The standard HTTP header system properties to set. If not specified, existing values will be cleared.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// setting the paths's HTTP headers.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the updated
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response SetHttpHeaders(
+ PathHttpHeaders? httpHeaders = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blockBlobClient.SetHttpHeaders(
+ httpHeaders?.ToBlobHttpHeaders(),
+ conditions,
+ cancellationToken);
+
+ return Response.FromValue(
+ new PathInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation sets system
+ /// properties on the PATH.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Optional. The standard HTTP header system properties to set. If not specified, existing values will be cleared.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// setting the path's HTTP headers.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the updated
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> SetHttpHeadersAsync(
+ PathHttpHeaders? httpHeaders = default,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blockBlobClient.SetHttpHeadersAsync(
+ httpHeaders?.ToBlobHttpHeaders(),
+ conditions,
+ cancellationToken)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new PathInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+ #endregion Set Http Headers
+
+ #region Set Metadata
+ ///
+ /// The operation sets user-defined
+ /// metadata for the specified path as one or more name-value pairs.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Custom metadata to set for this path.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// setting the path's metadata.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the updated
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual Response SetMetadata(
+ Metadata metadata,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = _blockBlobClient.SetMetadata(
+ metadata,
+ conditions);
+
+ return Response.FromValue(
+ new PathInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+
+ ///
+ /// The operation sets user-defined
+ /// metadata for the specified path as one or more name-value pairs.
+ ///
+ /// For more information, see .
+ ///
+ ///
+ /// Custom metadata to set for this path.
+ ///
+ ///
+ /// Optional to add conditions on
+ /// setting the path's metadata.
+ ///
+ ///
+ /// Optional to propagate
+ /// notifications that the operation should be cancelled.
+ ///
+ ///
+ /// A describing the updated
+ /// path.
+ ///
+ ///
+ /// A will be thrown if
+ /// a failure occurs.
+ ///
+ [ForwardsClientCalls]
+ public virtual async Task> SetMetadataAsync(
+ Metadata metadata,
+ DataLakeRequestConditions conditions = default,
+ CancellationToken cancellationToken = default)
+ {
+ Response response = await _blockBlobClient.SetMetadataAsync(
+ metadata,
+ conditions)
+ .ConfigureAwait(false);
+
+ return Response.FromValue(
+ new PathInfo()
+ {
+ ETag = response.Value.ETag,
+ LastModified = response.Value.LastModified
+ },
+ response.GetRawResponse());
+ }
+ #endregion Set Metadata
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/PathHttpHeadersExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/PathHttpHeadersExtensions.cs
new file mode 100644
index 000000000000..8af66adff3de
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/PathHttpHeadersExtensions.cs
@@ -0,0 +1,22 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using Azure.Storage.Blobs.Models;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class PathHttpHeadersExtensions
+ {
+ internal static BlobHttpHeaders ToBlobHttpHeaders(this PathHttpHeaders pathHttpHeaders) =>
+ new BlobHttpHeaders()
+ {
+ ContentType = pathHttpHeaders.ContentType,
+ ContentHash = pathHttpHeaders.ContentHash,
+ ContentEncoding = new string[] { pathHttpHeaders.ContentEncoding },
+ ContentLanguage = new string[] { pathHttpHeaders.ContentLanguage },
+ ContentDisposition = pathHttpHeaders.ContentDisposition,
+ CacheControl = pathHttpHeaders.CacheControl
+ };
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/PathItemExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/PathItemExtensions.cs
new file mode 100644
index 000000000000..d803c7d6888f
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/PathItemExtensions.cs
@@ -0,0 +1,68 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Globalization;
+using System.Text;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// This is a temporary work-around until we get json support in the .NET code generator.
+ /// I was not able to get the JsonSerializer to deserialize a Dictionary of string, List of PathItem correctly,
+ /// the PathItem fields were always set to null.
+ ///
+ internal static class PathItemExtensions
+ {
+ internal static PathItem ToPathItem(this Dictionary dictionary)
+ {
+ dictionary.TryGetValue("name", out string name);
+ dictionary.TryGetValue("isDirectory", out string isDirectoryString);
+ dictionary.TryGetValue("lastModified", out string lastModifiedString);
+ dictionary.TryGetValue("etag", out string etagString);
+ dictionary.TryGetValue("contentLength", out string contentLengthString);
+ dictionary.TryGetValue("owner", out string owner);
+ dictionary.TryGetValue("group", out string group);
+ dictionary.TryGetValue("permissions", out string permissions);
+
+ bool isDirectory = false;
+ if (isDirectoryString != null)
+ {
+ isDirectory = bool.Parse(isDirectoryString);
+ }
+
+ DateTimeOffset lastModified = new DateTimeOffset();
+ if (lastModifiedString != null)
+ {
+ lastModified = DateTimeOffset.Parse(lastModifiedString, CultureInfo.InvariantCulture);
+ }
+
+ ETag eTag = new ETag();
+ if (etagString != null)
+ {
+ eTag = new ETag(etagString);
+ }
+
+ long contentLength = 0;
+ if (contentLengthString != null)
+ {
+ contentLength = long.Parse(contentLengthString, CultureInfo.InvariantCulture);
+ }
+
+ PathItem pathItem = new PathItem()
+ {
+ Name = name,
+ IsDirectory = isDirectory,
+ LastModified = lastModified,
+ ETag = eTag,
+ ContentLength = contentLength,
+ Owner = owner,
+ Group = group,
+ Permissions = permissions
+ };
+ return pathItem;
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/PathUpdateResultExtensions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/PathUpdateResultExtensions.cs
new file mode 100644
index 000000000000..9af926bb4208
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/PathUpdateResultExtensions.cs
@@ -0,0 +1,49 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Text;
+using Azure.Storage.Files.DataLake.Models;
+
+namespace Azure.Storage.Files.DataLake
+{
+ internal static class PathUpdateResultExtensions
+ {
+ internal static PathContentInfo ToPathContentInfo(this PathUpdateResult pathUpdateResult) =>
+ new PathContentInfo()
+ {
+ ContentHash = pathUpdateResult.ContentMD5,
+ ETag = pathUpdateResult.ETag,
+ LastModified = pathUpdateResult.LastModified,
+ AcceptRanges = pathUpdateResult.AcceptRanges,
+ CacheControl = pathUpdateResult.CacheControl,
+ ContentDisposition = pathUpdateResult.ContentDisposition,
+ ContentEncoding = pathUpdateResult.ContentEncoding,
+ ContentLanguage = pathUpdateResult.ContentLanguage,
+ ContentLength = pathUpdateResult.ContentLength,
+ ContentRange = pathUpdateResult.ContentRange,
+ ContentType = pathUpdateResult.ContentType,
+ Metadata = ToMetadata(pathUpdateResult.Properties)
+ };
+
+ private static IDictionary ToMetadata(string rawMetdata)
+ {
+ if (rawMetdata == null)
+ {
+ return null;
+ }
+
+ IDictionary metadataDictionary = new Dictionary();
+ string[] metadataArray = rawMetdata.Split(',');
+ foreach (string entry in metadataArray)
+ {
+ string[] entryArray = entry.Split('=');
+ byte[] valueArray = Convert.FromBase64String(entryArray[1]);
+ metadataDictionary.Add(entryArray[0], Encoding.UTF8.GetString(valueArray));
+ }
+ return metadataDictionary;
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeAccountSasPermissions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeAccountSasPermissions.cs
new file mode 100644
index 000000000000..329937b9c5ce
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeAccountSasPermissions.cs
@@ -0,0 +1,99 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Text;
+using Azure.Storage.Files.DataLake.Sas;
+
+namespace Azure.Storage.Files.DataLake.Sas
+{
+ ///
+ /// contains the list of
+ /// permissions that can be set for a data lake account's access policy. Use
+ ///
+ /// to set the permissions on the .
+ ///
+ [Flags]
+ public enum DataLakeAccountSasPermissions
+ {
+ ///
+ /// Indicates that Read is permitted.
+ ///
+ Read = 1,
+
+ ///
+ /// Indicates that Add is permitted.
+ ///
+ Add = 2,
+
+ ///
+ /// Indicates that Create is permitted.
+ ///
+ Create = 4,
+
+ ///
+ /// Indicates that Write is permitted.
+ ///
+ Write = 8,
+
+ ///
+ /// Indicates that Delete is permitted.
+ ///
+ Delete = 16,
+
+ ///
+ /// Indicates that List is permitted.
+ ///
+ List = 32,
+
+ ///
+ /// Indicates that all permissions are set.
+ ///
+ All = ~0
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// Data Lake enum extensions
+ ///
+ internal static partial class DataLakeExtensions
+ {
+
+ ///
+ /// Create a permissions string to provide
+ /// .
+ ///
+ /// A permissions string.
+ internal static string ToPermissionsString(this DataLakeAccountSasPermissions permissions)
+ {
+ var sb = new StringBuilder();
+ if ((permissions & DataLakeAccountSasPermissions.Read) == DataLakeAccountSasPermissions.Read)
+ {
+ sb.Append(Constants.Sas.Permissions.Read);
+ }
+ if ((permissions & DataLakeAccountSasPermissions.Add) == DataLakeAccountSasPermissions.Add)
+ {
+ sb.Append(Constants.Sas.Permissions.Add);
+ }
+ if ((permissions & DataLakeAccountSasPermissions.Create) == DataLakeAccountSasPermissions.Create)
+ {
+ sb.Append(Constants.Sas.Permissions.Create);
+ }
+ if ((permissions & DataLakeAccountSasPermissions.Write) == DataLakeAccountSasPermissions.Write)
+ {
+ sb.Append(Constants.Sas.Permissions.Write);
+ }
+ if ((permissions & DataLakeAccountSasPermissions.Delete) == DataLakeAccountSasPermissions.Delete)
+ {
+ sb.Append(Constants.Sas.Permissions.Delete);
+ }
+ if ((permissions & DataLakeAccountSasPermissions.List) == DataLakeAccountSasPermissions.List)
+ {
+ sb.Append(Constants.Sas.Permissions.List);
+ }
+ return sb.ToString();
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeFileSystemSasPermissions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeFileSystemSasPermissions.cs
new file mode 100644
index 000000000000..09d832a8e5ad
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeFileSystemSasPermissions.cs
@@ -0,0 +1,99 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.ComponentModel;
+using System.Text;
+using Azure.Storage.Files.DataLake.Sas;
+
+namespace Azure.Storage.Files.DataLake.Sas
+{
+ ///
+ /// contains the list of
+ /// permissions that can be set for a file systems's access policy. Use
+ ///
+ /// to set the permissions on the .
+ ///
+ [Flags]
+ public enum DataLakeFileSystemSasPermissions
+ {
+ ///
+ /// Indicates that Read is permitted.
+ ///
+ Read = 1,
+
+ ///
+ /// Indicates that Add is permitted.
+ ///
+ Add = 2,
+
+ ///
+ /// Indicates that Create is permitted.
+ ///
+ Create = 4,
+
+ ///
+ /// Indicates that Write is permitted.
+ ///
+ Write = 8,
+
+ ///
+ /// Indicates that Delete is permitted.
+ ///
+ Delete = 16,
+
+ ///
+ /// Indicates that List is permitted.
+ ///
+ List = 32,
+
+ ///
+ /// Indicates that all permissions are set.
+ ///
+ All = ~0
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// Data Lake enum extensions.
+ ///
+ internal static partial class DataLakeExtensions
+ {
+ ///
+ /// Create a permissions string to provide
+ /// .
+ ///
+ /// A permissions string.
+ internal static string ToPermissionsString(this DataLakeFileSystemSasPermissions permissions)
+ {
+ var sb = new StringBuilder();
+ if ((permissions & DataLakeFileSystemSasPermissions.Read) == DataLakeFileSystemSasPermissions.Read)
+ {
+ sb.Append(Constants.Sas.Permissions.Read);
+ }
+ if ((permissions & DataLakeFileSystemSasPermissions.Add) == DataLakeFileSystemSasPermissions.Add)
+ {
+ sb.Append(Constants.Sas.Permissions.Add);
+ }
+ if ((permissions & DataLakeFileSystemSasPermissions.Create) == DataLakeFileSystemSasPermissions.Create)
+ {
+ sb.Append(Constants.Sas.Permissions.Create);
+ }
+ if ((permissions & DataLakeFileSystemSasPermissions.Write) == DataLakeFileSystemSasPermissions.Write)
+ {
+ sb.Append(Constants.Sas.Permissions.Write);
+ }
+ if ((permissions & DataLakeFileSystemSasPermissions.Delete) == DataLakeFileSystemSasPermissions.Delete)
+ {
+ sb.Append(Constants.Sas.Permissions.Delete);
+ }
+ if ((permissions & DataLakeFileSystemSasPermissions.List) == DataLakeFileSystemSasPermissions.List)
+ {
+ sb.Append(Constants.Sas.Permissions.List);
+ }
+ return sb.ToString();
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasBuilder.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasBuilder.cs
new file mode 100644
index 000000000000..e2c3ca38397e
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasBuilder.cs
@@ -0,0 +1,396 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.ComponentModel;
+using System.Security.Cryptography;
+using System.Text;
+using Azure.Storage.Files.DataLake.Models;
+using Azure.Storage.Sas;
+
+namespace Azure.Storage.Files.DataLake.Sas
+{
+ ///
+ /// is used to generate a Shared Access
+ /// Signature (SAS) for a Data Lake file system or path
+ /// For more information, see .
+ ///
+ public class DataLakeSasBuilder
+ {
+ ///
+ /// The storage service version to use to authenticate requests made
+ /// with this shared access signature, and the service version to use
+ /// when handling requests made with this shared access signature.
+ ///
+ public string Version { get; set; }
+
+ ///
+ /// The optional signed protocol field specifies the protocol
+ /// permitted for a request made with the SAS. Possible values are
+ /// ,
+ /// , and
+ /// .
+ ///
+ public SasProtocol Protocol { get; set; }
+
+ ///
+ /// Optionally specify the time at which the shared access signature
+ /// becomes valid. If omitted when DateTimeOffset.MinValue is used,
+ /// start time for this call is assumed to be the time when the
+ /// storage service receives the request.
+ ///
+ public DateTimeOffset StartsOn { get; set; }
+
+ ///
+ /// The time at which the shared access signature becomes invalid.
+ /// This field must be omitted if it has been specified in an
+ /// associated stored access policy.
+ ///
+ public DateTimeOffset ExpiresOn { get; set; }
+
+ ///
+ /// The permissions associated with the shared access signature. The
+ /// user is restricted to operations allowed by the permissions. This
+ /// field must be omitted if it has been specified in an associated
+ /// stored access policy. The ,
+ /// , ,
+ /// or can be used to create the
+ /// permissions string.
+ ///
+ public string Permissions { get; private set; }
+
+ ///
+ /// Specifies an IP address or a range of IP addresses from which to
+ /// accept requests. If the IP address from which the request
+ /// originates does not match the IP address or address range
+ /// specified on the SAS token, the request is not authenticated.
+ /// When specifying a range of IP addresses, note that the range is
+ /// inclusive.
+ ///
+ public SasIPRange IPRange { get; set; }
+
+ ///
+ /// An optional unique value up to 64 characters in length that
+ /// correlates to an access policy specified for the container.
+ ///
+ public string Identifier { get; set; }
+
+ ///
+ /// The name of the file system being made accessible.
+ ///
+ public string FileSystemName { get; set; }
+
+ ///
+ /// The name of the path being made accessible, or
+ /// for a file system SAS.
+ ///
+ public string Path { get; set; }
+
+ ///
+ /// Specifies which resources are accessible via the shared access
+ /// signature.
+ ///
+ /// Specify b if the shared resource is a blob. This grants access to
+ /// the content and metadata of the blob.
+ ///
+ /// Specify c if the shared resource is a blob container. This grants
+ /// access to the content and metadata of any blob in the container,
+ /// and to the list of blobs in the container.
+ ///
+ /// Beginning in version 2018-11-09, specify bs if the shared resource
+ /// is a blob snapshot. This grants access to the content and
+ /// metadata of the specific snapshot, but not the corresponding root
+ /// blob.
+ ///
+ public string Resource { get; set; }
+
+ ///
+ /// Override the value returned for Cache-Control response header.
+ ///
+ public string CacheControl { get; set; }
+
+ ///
+ /// Override the value returned for Content-Disposition response
+ /// header.
+ ///
+ public string ContentDisposition { get; set; }
+
+ ///
+ /// Override the value returned for Cache-Encoding response header.
+ ///
+ public string ContentEncoding { get; set; }
+
+ ///
+ /// Override the value returned for Cache-Language response header.
+ ///
+ public string ContentLanguage { get; set; }
+
+ ///
+ /// Override the value returned for Cache-Type response header.
+ ///
+ public string ContentType { get; set; }
+
+ ///
+ /// Sets the permissions for a file SAS.
+ ///
+ ///
+ /// containing the allowed permissions.
+ ///
+ public void SetPermissions(DataLakeSasPermissions permissions)
+ {
+ Permissions = permissions.ToPermissionsString();
+ }
+
+ ///
+ /// Sets the permissions for a path account level SAS.
+ ///
+ ///
+ /// containing the allowed permissions.
+ ///
+ public void SetPermissions(DataLakeAccountSasPermissions permissions)
+ {
+ Permissions = permissions.ToPermissionsString();
+ }
+
+ ///
+ /// Sets the permissions for a file system SAS.
+ ///
+ ///
+ /// containing the allowed permissions.
+ ///
+ public void SetPermissions(DataLakeFileSystemSasPermissions permissions)
+ {
+ Permissions = permissions.ToPermissionsString();
+ }
+
+ ///
+ /// Sets the permissions for the SAS using a raw permissions string.
+ ///
+ /// Raw permissions string for the SAS.
+ public void SetPermissions(string rawPermissions)
+ {
+ Permissions = rawPermissions;
+ }
+
+ ///
+ /// Use an account's to sign this
+ /// shared access signature values to produce the proper SAS query
+ /// parameters for authenticating requests.
+ ///
+ ///
+ /// The storage account's .
+ ///
+ ///
+ /// The used for authenticating
+ /// requests.
+ ///
+ public DataLakeSasQueryParameters ToSasQueryParameters(StorageSharedKeyCredential sharedKeyCredential)
+ {
+ sharedKeyCredential = sharedKeyCredential ?? throw Errors.ArgumentNull(nameof(sharedKeyCredential));
+
+ EnsureState();
+
+ var startTime = SasQueryParameters.FormatTimesForSasSigning(StartsOn);
+ var expiryTime = SasQueryParameters.FormatTimesForSasSigning(ExpiresOn);
+
+ // See http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
+ var stringToSign = String.Join("\n",
+ Permissions,
+ startTime,
+ expiryTime,
+ GetCanonicalName(sharedKeyCredential.AccountName, FileSystemName ?? String.Empty, Path ?? String.Empty),
+ Identifier,
+ IPRange.ToString(),
+ Protocol.ToProtocolString(),
+ Version,
+ Resource,
+ null, // snapshot
+ CacheControl,
+ ContentDisposition,
+ ContentEncoding,
+ ContentLanguage,
+ ContentType);
+
+ var signature = sharedKeyCredential.ComputeHMACSHA256(stringToSign);
+
+ var p = new DataLakeSasQueryParameters(
+ version: Version,
+ services: default,
+ resourceTypes: default,
+ protocol: Protocol,
+ startsOn: StartsOn,
+ expiresOn: ExpiresOn,
+ ipRange: IPRange,
+ identifier: Identifier,
+ resource: Resource,
+ permissions: Permissions,
+ signature: signature,
+ cacheControl: CacheControl,
+ contentDisposition: ContentDisposition,
+ contentEncoding: ContentEncoding,
+ contentLanguage: ContentLanguage,
+ contentType: ContentType);
+ return p;
+ }
+
+ ///
+ /// Use an account's to sign this
+ /// shared access signature values to produce the proper SAS query
+ /// parameters for authenticating requests.
+ ///
+ ///
+ /// A returned from
+ /// .
+ ///
+ /// The name of the storage account.
+ ///
+ /// The used for authenticating requests.
+ ///
+ public DataLakeSasQueryParameters ToSasQueryParameters(UserDelegationKey userDelegationKey, string accountName)
+ {
+ userDelegationKey = userDelegationKey ?? throw Errors.ArgumentNull(nameof(userDelegationKey));
+
+ EnsureState();
+
+ var startTime = SasQueryParameters.FormatTimesForSasSigning(StartsOn);
+ var expiryTime = SasQueryParameters.FormatTimesForSasSigning(ExpiresOn);
+ var signedStart = SasQueryParameters.FormatTimesForSasSigning(userDelegationKey.SignedStartsOn);
+ var signedExpiry = SasQueryParameters.FormatTimesForSasSigning(userDelegationKey.SignedExpiresOn);
+
+ // See http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
+ var stringToSign = String.Join("\n",
+ Permissions,
+ startTime,
+ expiryTime,
+ GetCanonicalName(accountName, FileSystemName ?? String.Empty, Path ?? String.Empty),
+ userDelegationKey.SignedObjectId,
+ userDelegationKey.SignedTenantId,
+ signedStart,
+ signedExpiry,
+ userDelegationKey.SignedService,
+ userDelegationKey.SignedVersion,
+ IPRange.ToString(),
+ Protocol.ToProtocolString(),
+ Version,
+ Resource,
+ null, // snapshot
+ CacheControl,
+ ContentDisposition,
+ ContentEncoding,
+ ContentLanguage,
+ ContentType);
+
+ var signature = ComputeHMACSHA256(userDelegationKey.Value, stringToSign);
+
+ var p = new DataLakeSasQueryParameters(
+ version: Version,
+ services: default,
+ resourceTypes: default,
+ protocol: Protocol,
+ startsOn: StartsOn,
+ expiresOn: ExpiresOn,
+ ipRange: IPRange,
+ identifier: null,
+ resource: Resource,
+ permissions: Permissions,
+ keyOid: userDelegationKey.SignedObjectId,
+ keyTid: userDelegationKey.SignedTenantId,
+ keyStart: userDelegationKey.SignedStartsOn,
+ keyExpiry: userDelegationKey.SignedExpiresOn,
+ keyService: userDelegationKey.SignedService,
+ keyVersion: userDelegationKey.SignedVersion,
+ signature: signature,
+ cacheControl: CacheControl,
+ contentDisposition: ContentDisposition,
+ contentEncoding: ContentEncoding,
+ contentLanguage: ContentLanguage,
+ contentType: ContentType);
+ return p;
+ }
+
+ ///
+ /// Computes the canonical name for a container or blob resource for SAS signing.
+ /// Container: "/blob/account/containername"
+ /// Blob: "/blob/account/containername/blobname"
+ ///
+ /// The name of the storage account.
+ /// The name of the container.
+ /// The name of the blob.
+ /// The canonical resource name.
+ private static string GetCanonicalName(string account, string fileSystemName, string path)
+ => !String.IsNullOrEmpty(path)
+ ? $"/blob/{account}/{fileSystemName}/{path.Replace("\\", "/")}"
+ : $"/blob/{account}/{fileSystemName}";
+
+ ///
+ /// ComputeHMACSHA256 generates a base-64 hash signature string for an
+ /// HTTP request or for a SAS.
+ ///
+ ///
+ /// A used to sign with a key
+ /// representing AD credentials.
+ ///
+ /// The message to sign.
+ /// The signed message.
+ private static string ComputeHMACSHA256(string userDelegationKeyValue, string message) =>
+ Convert.ToBase64String(
+ new HMACSHA256(
+ Convert.FromBase64String(userDelegationKeyValue))
+ .ComputeHash(Encoding.UTF8.GetBytes(message)));
+
+ ///
+ /// Ensure the 's properties are in a
+ /// consistent state.
+ ///
+ private void EnsureState()
+ {
+ if (ExpiresOn == default)
+ {
+ throw Errors.SasMissingData(nameof(ExpiresOn));
+ }
+ if (string.IsNullOrEmpty(Permissions))
+ {
+ throw Errors.SasMissingData(nameof(Permissions));
+ }
+ // File System
+ if (String.IsNullOrEmpty(Path))
+ {
+ Resource = Constants.Sas.Resource.Container;
+ }
+
+ // Path
+ else
+ {
+ Resource = Constants.Sas.Resource.Blob;
+ }
+ if (String.IsNullOrEmpty(Version))
+ {
+ Version = SasQueryParameters.DefaultSasVersion;
+ }
+ }
+
+ ///
+ /// Returns a string that represents the current object.
+ ///
+ /// A string that represents the current object.
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override string ToString() =>
+ base.ToString();
+
+ ///
+ /// Check if two BlobSasBuilder instances are equal.
+ ///
+ /// The instance to compare to.
+ /// True if they're equal, false otherwise.
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override bool Equals(object obj)
+ => base.Equals(obj);
+
+ ///
+ /// Get a hash code for the BlobSasBuilder.
+ ///
+ /// Hash code for the BlobSasBuilder.
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override int GetHashCode() => base.GetHashCode();
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasPermissions.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasPermissions.cs
new file mode 100644
index 000000000000..546bd4ef8db9
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasPermissions.cs
@@ -0,0 +1,91 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.ComponentModel;
+using System.Text;
+using Azure.Storage.Files.DataLake.Sas;
+
+namespace Azure.Storage.Files.DataLake.Sas
+{
+ ///
+ /// contains the list of
+ /// permissions that can be set for a blob's access policy. Use
+ ///
+ /// to set the permissions on the .
+ ///
+ [Flags]
+ public enum DataLakeSasPermissions
+ {
+ ///
+ /// Indicates that Read is permitted.
+ ///
+ Read = 1,
+
+ ///
+ /// Indicates that Add is permitted.
+ ///
+ Add = 2,
+
+ ///
+ /// Indicates that Create is permitted.
+ ///
+ Create = 4,
+
+ ///
+ /// Indicates that Write is permitted.
+ ///
+ Write = 8,
+
+ ///
+ /// Indicates that Delete is permitted.
+ ///
+ Delete = 16,
+
+ ///
+ /// Indicates that all permissions are set.
+ ///
+ All = ~0
+ }
+}
+
+namespace Azure.Storage.Files.DataLake
+{
+ ///
+ /// Data Lake enum extensions
+ ///
+ internal static partial class DataLakeExtensions
+ {
+
+ ///
+ /// Create a permissions string to provide
+ /// .
+ ///
+ /// A permissions string.
+ internal static string ToPermissionsString(this DataLakeSasPermissions permissions)
+ {
+ var sb = new StringBuilder();
+ if ((permissions & DataLakeSasPermissions.Read) == DataLakeSasPermissions.Read)
+ {
+ sb.Append(Constants.Sas.Permissions.Read);
+ }
+ if ((permissions & DataLakeSasPermissions.Add) == DataLakeSasPermissions.Add)
+ {
+ sb.Append(Constants.Sas.Permissions.Add);
+ }
+ if ((permissions & DataLakeSasPermissions.Create) == DataLakeSasPermissions.Create)
+ {
+ sb.Append(Constants.Sas.Permissions.Create);
+ }
+ if ((permissions & DataLakeSasPermissions.Write) == DataLakeSasPermissions.Write)
+ {
+ sb.Append(Constants.Sas.Permissions.Write);
+ }
+ if ((permissions & DataLakeSasPermissions.Delete) == DataLakeSasPermissions.Delete)
+ {
+ sb.Append(Constants.Sas.Permissions.Delete);
+ }
+ return sb.ToString();
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasQueryParameters.cs b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasQueryParameters.cs
new file mode 100644
index 000000000000..1fc3009297ad
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/src/Sas/DataLakeSasQueryParameters.cs
@@ -0,0 +1,135 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using Azure.Storage.Sas;
+
+namespace Azure.Storage.Files.DataLake.Sas
+{
+ ///
+ /// A object represents the components
+ /// that make up an Azure Storage Shared Access Signature's query
+ /// parameters. You can construct a new instance using
+ /// .
+ ///
+ /// For more information, .
+ ///
+ public sealed class DataLakeSasQueryParameters : SasQueryParameters
+ {
+ ///
+ /// Gets the Azure Active Directory object ID in GUID format.
+ ///
+ public string KeyObjectId => _keyObjectId;
+
+ ///
+ /// Gets the Azure Active Directory tenant ID in GUID format
+ ///
+ public string KeyTenantId => _keyTenantId;
+
+ ///
+ /// Gets the time at which the key becomes valid.
+ ///
+ public DateTimeOffset KeyStart => _keyStart;
+
+ ///
+ /// Gets the time at which the key becomes expires.
+ ///
+ public DateTimeOffset KeyExpiry => _keyExpiry;
+
+ ///
+ /// Gets the Storage service that accepts the key.
+ ///
+ public string KeyService => _keyService;
+
+ ///
+ /// Gets the Storage service version that created the key.
+ ///
+ public string KeyVersion => _keyVersion;
+
+ ///
+ /// Gets empty shared access signature query parameters.
+ ///
+ public static new DataLakeSasQueryParameters Empty => new DataLakeSasQueryParameters();
+
+ internal DataLakeSasQueryParameters()
+ : base()
+ {
+ }
+
+ ///
+ /// Creates a new instance of the
+ /// type.
+ ///
+ /// Expects decoded values.
+ ///
+ internal DataLakeSasQueryParameters(
+ string version,
+ AccountSasServices? services,
+ AccountSasResourceTypes? resourceTypes,
+ SasProtocol protocol,
+ DateTimeOffset startsOn,
+ DateTimeOffset expiresOn,
+ SasIPRange ipRange,
+ string identifier,
+ string resource,
+ string permissions,
+ string signature,
+ string keyOid = default,
+ string keyTid = default,
+ DateTimeOffset keyStart = default,
+ DateTimeOffset keyExpiry = default,
+ string keyService = default,
+ string keyVersion = default,
+ string cacheControl = default,
+ string contentDisposition = default,
+ string contentEncoding = default,
+ string contentLanguage = default,
+ string contentType = default)
+ : base(
+ version,
+ services,
+ resourceTypes,
+ protocol,
+ startsOn,
+ expiresOn,
+ ipRange,
+ identifier,
+ resource,
+ permissions,
+ signature,
+ keyOid,
+ keyTid,
+ keyStart,
+ keyExpiry,
+ keyService,
+ keyVersion,
+ cacheControl,
+ contentDisposition,
+ contentEncoding,
+ contentLanguage,
+ contentType)
+ {
+ }
+
+ ///
+ /// Creates a new instance of the
+ /// type based on the supplied query parameters .
+ /// All SAS-related query parameters will be removed from
+ /// .
+ ///
+ /// URI query parameters
+ internal DataLakeSasQueryParameters(UriQueryParamsCollection values)
+ : base(values, includeBlobParameters: true)
+ {
+ }
+
+ ///
+ /// Convert the SAS query parameters into a URL encoded query string.
+ ///
+ ///
+ /// A URL encoded query string representing the SAS.
+ ///
+ public override string ToString() =>
+ Encode(includeBlobParameters: true);
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/swagger/readme.md b/sdk/storage/Azure.Storage.Files.DataLake/swagger/readme.md
index cb465ebc0ba1..4334b6d99aa9 100644
--- a/sdk/storage/Azure.Storage.Files.DataLake/swagger/readme.md
+++ b/sdk/storage/Azure.Storage.Files.DataLake/swagger/readme.md
@@ -27,7 +27,7 @@ directive:
$.namespace = "Azure.Storage.Files.DataLake";
$["client-name"] = "DataLakeRestClient";
$["client-extensions-name"] = "FilesDataLakeExtensions";
- $["client-model-factory-name"] = "FilesDataLakeModelFactory";
+ $["client-model-factory-name"] = "DataLakeModelFactory";
$["x-az-skip-path-components"] = true;
$["x-az-include-sync-methods"] = true;
$["x-az-public"] = false;
@@ -67,10 +67,10 @@ directive:
- from: swagger-document
where: $["x-ms-paths"]..responses.default
transform: >
+ delete $.headers;
$["x-az-response-name"] = "StorageErrorResult";
$["x-az-create-exception"] = true;
$["x-az-public"] = false;
- $.headers["x-ms-error-code"]["x-az-demote-header"] = true;
```
### ApiVersionParameter
@@ -95,7 +95,16 @@ directive:
transform: return "rangeGetContentHash";
```
-### StorageError
+### ErrorCode
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions.ErrorCode
+ transform: >
+ $["x-ms-enum"].name = "DataLakeErrorCode";
+```
+
+### Hide StorageError
``` yaml
directive:
- from: swagger-document
@@ -103,4 +112,114 @@ directive:
transform: >
$["x-az-public"] = false;
$.properties.Code = { "type": "string" };
+- from: swagger-document
+ where: $.definitions.DataLakeStorageError
+ transform: >
+ $["x-az-public"] = false;
+```
+
+### Remove extra consumes/produces values
+To avoid an arbitrary limitation in our generator
+``` yaml
+directive:
+- from: swagger-document
+ where: $.consumes
+ transform: >
+ return ["application/xml"];
+- from: swagger-document
+ where: $.produces
+ transform: >
+ return ["application/xml"];
```
+
+### Temporarily work around proper JSON support for file permissions
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{filesystem}?resource=filesystem"]
+ transform: >
+ delete $.get.responses["200"].schema;
+ $.get.responses["200"].schema = {
+ "type": "object",
+ "format": "file"
+ };
+ $.get.responses["200"]["x-az-public"] = false;
+- from: swagger-document
+ where: $.definitions.StorageError
+ transform: >
+ $.type = "string";
+ delete $.properties;
+```
+
+### /{filesystem}
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{filesystem}"]
+ transform: >
+ $.put.responses["201"]["x-az-public"] = false;
+ $.head.responses["200"]["x-az-public"] = false;
+ $.patch.responses["200"]["x-az-public"] = false;
+```
+
+### /{filesystem}?resource=filesystem
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/"]
+ transform: >
+ $.get.responses["200"]["x-az-public"] = false;
+```
+
+### /{filesystem}/{path}
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{filesystem}/{path}"]
+ transform: >
+ $.put.responses["201"]["x-az-public"] = false;
+ $.delete.responses["200"]["x-az-public"] = false;
+ $.head.responses["200"]["x-az-public"] = false;
+ $.post.responses["200"]["x-az-public"] = false;
+ $.get.responses["200"]["x-az-public"] = false;
+ $.patch.responses["200"]["x-az-public"] = false;
+```
+
+### /{filesystem}/{path}?action=append"
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{filesystem}/{path}?action=append"]
+ transform: >
+ $.patch.responses["202"]["x-az-public"] = false;
+```
+
+### /{filesystem}/{path}?action=flush"
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{filesystem}/{path}?action=flush"]
+ transform: >
+ $.patch.responses["200"]["x-az-public"] = false;
+```
+
+### /{filesystem}/{path}?action=setAccessControl"
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{filesystem}/{path}?action=setAccessControl"]
+ transform: >
+ $.patch.responses["200"]["x-az-public"] = false;
+```
+
+### Hide FileSystemList/FileSystem/PathList/Path/
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ $.FileSystem["x-az-public"] = false;
+ $.FileSystemList["x-az-public"] = false;
+ $.PathList["x-az-public"] = false;
+ $.Path["x-az-public"] = false;
+```
\ No newline at end of file
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeTestBase.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeTestBase.cs
new file mode 100644
index 000000000000..2898376966b2
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DataLakeTestBase.cs
@@ -0,0 +1,368 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Net;
+using System.Threading.Tasks;
+using Azure.Core;
+using Azure.Core.Pipeline;
+using Azure.Core.Testing;
+using Azure.Storage.Files.DataLake.Models;
+using Azure.Storage.Files.DataLake.Sas;
+using Azure.Storage.Sas;
+using Azure.Storage.Test;
+using Azure.Storage.Test.Shared;
+using NUnit.Framework;
+
+namespace Azure.Storage.Files.DataLake.Tests
+{
+ public abstract class DataLakeTestBase : StorageTestBase
+ {
+ public readonly string ReceivedETag = "\"received\"";
+ public readonly string GarbageETag = "\"garbage\"";
+ public readonly string ReceivedLeaseId = "received";
+ public readonly string CacheControl = "control";
+ public readonly string ContentDisposition = "disposition";
+ public readonly string ContentEncoding = "encoding";
+ public readonly string ContentLanguage = "language";
+ public readonly string ContentType = "type";
+
+ public DataLakeTestBase(bool async) : this(async, null) { }
+
+ public DataLakeTestBase(bool async, RecordedTestMode? mode = null)
+ : base(async, mode)
+ {
+ }
+
+ public DateTimeOffset OldDate => Recording.Now.AddDays(-1);
+ public DateTimeOffset NewDate => Recording.Now.AddDays(1);
+ public string GetGarbageLeaseId() => Recording.Random.NewGuid().ToString();
+ public string GetNewFileSystemName() => $"test-filesystem-{Recording.Random.NewGuid()}";
+ public string GetNewDirectoryName() => $"test-directory-{Recording.Random.NewGuid()}";
+ public string GetNewFileName() => $"test-file-{Recording.Random.NewGuid()}";
+
+ public DataLakeClientOptions GetOptions(bool parallelRange = false)
+ {
+ var options = new DataLakeClientOptions
+ {
+ Diagnostics = { IsLoggingEnabled = true },
+ Retry =
+ {
+ Mode = RetryMode.Exponential,
+ MaxRetries = Constants.MaxReliabilityRetries,
+ Delay = TimeSpan.FromSeconds(Mode == RecordedTestMode.Playback ? 0.01 : 0.5),
+ MaxDelay = TimeSpan.FromSeconds(Mode == RecordedTestMode.Playback ? 0.1 : 10)
+ }
+ };
+ if (Mode != RecordedTestMode.Live)
+ {
+ options.AddPolicy(new RecordedClientRequestIdPolicy(Recording, parallelRange), HttpPipelinePosition.PerCall);
+ }
+
+ return Recording.InstrumentClientOptions(options);
+ }
+
+ public DataLakeServiceClient GetServiceClientFromSharedKeyConfig(TenantConfiguration config)
+ => InstrumentClient(
+ new DataLakeServiceClient(
+ new Uri(config.BlobServiceEndpoint),
+ new StorageSharedKeyCredential(config.AccountName, config.AccountKey),
+ GetOptions()));
+
+ public DataLakeServiceClient GetServiceClientFromOauthConfig(TenantConfiguration config)
+ => InstrumentClient(
+ new DataLakeServiceClient(
+ (new Uri(config.BlobServiceEndpoint)).ToHttps(),
+ GetOAuthCredential(config),
+ GetOptions()));
+
+ public DataLakeServiceClient GetServiceClient_SharedKey()
+ => GetServiceClientFromSharedKeyConfig(TestConfigHierarchicalNamespace);
+
+ public DataLakeServiceClient GetServiceClient_OAuth()
+ => GetServiceClientFromOauthConfig(TestConfigHierarchicalNamespace);
+
+
+ public IDisposable GetNewFileSystem(
+ out FileSystemClient fileSystem,
+ DataLakeServiceClient service = default,
+ string fileSystemName = default,
+ IDictionary metadata = default,
+ Models.PublicAccessType publicAccessType = Models.PublicAccessType.None,
+ bool premium = default)
+ {
+ fileSystemName ??= GetNewFileSystemName();
+ service ??= GetServiceClient_SharedKey();
+ fileSystem = InstrumentClient(service.GetFileSystemClient(fileSystemName));
+
+ if (publicAccessType == Models.PublicAccessType.None)
+ {
+ publicAccessType = premium ? Models.PublicAccessType.None : Models.PublicAccessType.Container;
+ }
+
+ return new DisposingFileSystem(
+ fileSystem,
+ metadata ?? new Dictionary(StringComparer.OrdinalIgnoreCase),
+ publicAccessType);
+ }
+
+ public IDisposable GetNewDirectory(out DirectoryClient directory, DataLakeServiceClient service = default, string fileSystemName = default, string directoryName = default)
+ {
+ IDisposable disposingFileSystem = GetNewFileSystem(out FileSystemClient fileSystem, service, fileSystemName);
+ directory = InstrumentClient(fileSystem.GetDirectoryClient(directoryName ?? GetNewDirectoryName()));
+ _ = directory.CreateAsync().Result;
+ return disposingFileSystem;
+ }
+
+ public IDisposable GetNewFile(out FileClient file, DataLakeServiceClient service = default, string fileSystemName = default, string directoryName = default, string fileName = default)
+ {
+ IDisposable disposingFileSystem = GetNewFileSystem(out FileSystemClient fileSystem, service, fileSystemName);
+ DirectoryClient directory = InstrumentClient(fileSystem.GetDirectoryClient(directoryName ?? GetNewDirectoryName()));
+ _ = directory.CreateAsync().Result;
+
+ file = InstrumentClient(directory.GetFileClient(fileName ?? GetNewFileName()));
+ _ = file.CreateAsync().Result;
+
+ return disposingFileSystem;
+ }
+
+ public static void AssertValidStoragePathInfo(PathInfo pathInfo)
+ {
+ Assert.IsNotNull(pathInfo.ETag);
+ Assert.IsNotNull(pathInfo.LastModified);
+ }
+
+ public void AssertMetadataEquality(
+ IDictionary expected,
+ IDictionary actual,
+ bool isDirectory)
+ {
+ Assert.IsNotNull(expected, "Expected metadata is null");
+ Assert.IsNotNull(actual, "Actual metadata is null");
+
+ if (isDirectory)
+ {
+ Assert.AreEqual(expected.Count + 1, actual.Count, "Metadata counts are not equal");
+ }
+ else
+ {
+ Assert.AreEqual(expected.Count, actual.Count, "Metadata counts are not equal");
+ }
+
+
+ foreach (KeyValuePair kvp in expected)
+ {
+ if (!actual.TryGetValue(kvp.Key, out var value) ||
+ string.Compare(kvp.Value, value, StringComparison.OrdinalIgnoreCase) != 0)
+ {
+ Assert.Fail($"Expected key <{kvp.Key}> with value <{kvp.Value}> not found");
+ }
+ }
+ }
+
+ public DataLakeServiceClient GetServiceClient_AccountSas(
+ StorageSharedKeyCredential sharedKeyCredentials = default,
+ DataLakeSasQueryParameters sasCredentials = default)
+ => InstrumentClient(
+ new DataLakeServiceClient(
+ new Uri($"{TestConfigHierarchicalNamespace.BlobServiceEndpoint}?{sasCredentials ?? GetNewAccountSasCredentials(sharedKeyCredentials ?? GetNewSharedKeyCredentials())}"),
+ GetOptions()));
+
+ public DataLakeServiceClient GetServiceClient_DataLakeServiceSas_FileSystem(
+ string fileSystemName,
+ StorageSharedKeyCredential sharedKeyCredentials = default,
+ DataLakeSasQueryParameters sasCredentials = default)
+ => InstrumentClient(
+ new DataLakeServiceClient(
+ new Uri($"{TestConfigHierarchicalNamespace.BlobServiceEndpoint}?{sasCredentials ?? GetNewDataLakeServiceSasCredentialsFileSystem(fileSystemName: fileSystemName, sharedKeyCredentials: sharedKeyCredentials ?? GetNewSharedKeyCredentials())}"),
+ GetOptions()));
+
+ public DataLakeServiceClient GetServiceClient_DataLakeServiceIdentitySas_FileSystem(
+ string fileSystemName,
+ UserDelegationKey userDelegationKey,
+ DataLakeSasQueryParameters sasCredentials = default)
+ => InstrumentClient(
+ new DataLakeServiceClient(
+ (new Uri($"{TestConfigHierarchicalNamespace.BlobServiceEndpoint}?{sasCredentials ?? GetNewDataLakeServiceIdentitySasCredentialsFileSystem(fileSystemName: fileSystemName, userDelegationKey, TestConfigHierarchicalNamespace.AccountName)}")).ToHttps(),
+ GetOptions()));
+
+ public DataLakeServiceClient GetServiceClient_DataLakeServiceSas_Path(
+ string fileSystemName,
+ string path,
+ StorageSharedKeyCredential sharedKeyCredentials = default,
+ DataLakeSasQueryParameters sasCredentials = default)
+ => InstrumentClient(
+ new DataLakeServiceClient(
+ new Uri($"{TestConfigHierarchicalNamespace.BlobServiceEndpoint}?{sasCredentials ?? GetNewDataLakeServiceSasCredentialsPath(fileSystemName: fileSystemName, path: path, sharedKeyCredentials: sharedKeyCredentials ?? GetNewSharedKeyCredentials())}"),
+ GetOptions()));
+
+ public DataLakeServiceClient GetServiceClient_DataLakeServiceIdentitySas_Path(
+ string fileSystemName,
+ string path,
+ UserDelegationKey userDelegationKey,
+ DataLakeSasQueryParameters sasCredentials = default)
+ => InstrumentClient(
+ new DataLakeServiceClient(
+ (new Uri($"{TestConfigHierarchicalNamespace.BlobServiceEndpoint}?{sasCredentials ?? GetNewDataLakeServiceIdentitySasCredentialsPath(fileSystemName: fileSystemName, path: path, userDelegationKey: userDelegationKey, accountName: TestConfigHierarchicalNamespace.AccountName)}")).ToHttps(),
+ GetOptions()));
+
+
+ public StorageSharedKeyCredential GetNewSharedKeyCredentials()
+ => new StorageSharedKeyCredential(
+ TestConfigHierarchicalNamespace.AccountName,
+ TestConfigHierarchicalNamespace.AccountKey);
+
+ public SasQueryParameters GetNewAccountSasCredentials(StorageSharedKeyCredential sharedKeyCredentials = default)
+ {
+ var builder = new AccountSasBuilder
+ {
+ Protocol = SasProtocol.None,
+ Services = AccountSasServices.Blobs,
+ ResourceTypes = AccountSasResourceTypes.Container | AccountSasResourceTypes.Object,
+ StartsOn = Recording.UtcNow.AddHours(-1),
+ ExpiresOn = Recording.UtcNow.AddHours(+1),
+ IPRange = new SasIPRange(IPAddress.None, IPAddress.None)
+ };
+ builder.SetPermissions(
+ AccountSasPermissions.Read |
+ AccountSasPermissions.Add |
+ AccountSasPermissions.Create |
+ AccountSasPermissions.Write |
+ AccountSasPermissions.Delete |
+ AccountSasPermissions.List);
+ return builder.ToSasQueryParameters(sharedKeyCredentials);
+ }
+
+ public DataLakeSasQueryParameters GetNewDataLakeServiceSasCredentialsFileSystem(string fileSystemName, StorageSharedKeyCredential sharedKeyCredentials = default)
+ {
+ var builder = new DataLakeSasBuilder
+ {
+ FileSystemName = fileSystemName,
+ Protocol = SasProtocol.None,
+ StartsOn = Recording.UtcNow.AddHours(-1),
+ ExpiresOn = Recording.UtcNow.AddHours(+1),
+ IPRange = new SasIPRange(IPAddress.None, IPAddress.None)
+ };
+ builder.SetPermissions(DataLakeFileSystemSasPermissions.All);
+ return builder.ToSasQueryParameters(sharedKeyCredentials ?? GetNewSharedKeyCredentials());
+ }
+
+ public DataLakeSasQueryParameters GetNewDataLakeServiceIdentitySasCredentialsFileSystem(string fileSystemName, UserDelegationKey userDelegationKey, string accountName)
+ {
+ var builder = new DataLakeSasBuilder
+ {
+ FileSystemName = fileSystemName,
+ Protocol = SasProtocol.None,
+ StartsOn = Recording.UtcNow.AddHours(-1),
+ ExpiresOn = Recording.UtcNow.AddHours(+1),
+ IPRange = new SasIPRange(IPAddress.None, IPAddress.None)
+ };
+ builder.SetPermissions(DataLakeFileSystemSasPermissions.All);
+ return builder.ToSasQueryParameters(userDelegationKey, accountName);
+ }
+
+ public DataLakeSasQueryParameters GetNewDataLakeServiceSasCredentialsPath(string fileSystemName, string path, StorageSharedKeyCredential sharedKeyCredentials = default)
+ {
+ var builder = new DataLakeSasBuilder
+ {
+ FileSystemName = fileSystemName,
+ Path = path,
+ Protocol = SasProtocol.None,
+ StartsOn = Recording.UtcNow.AddHours(-1),
+ ExpiresOn = Recording.UtcNow.AddHours(+1),
+ IPRange = new SasIPRange(IPAddress.None, IPAddress.None)
+ };
+ builder.SetPermissions(
+ DataLakeSasPermissions.Read |
+ DataLakeSasPermissions.Add |
+ DataLakeSasPermissions.Create |
+ DataLakeSasPermissions.Delete |
+ DataLakeSasPermissions.Write);
+ return builder.ToSasQueryParameters(sharedKeyCredentials ?? GetNewSharedKeyCredentials());
+ }
+
+ public DataLakeSasQueryParameters GetNewDataLakeServiceIdentitySasCredentialsPath(string fileSystemName, string path, UserDelegationKey userDelegationKey, string accountName)
+ {
+ var builder = new DataLakeSasBuilder
+ {
+ FileSystemName = fileSystemName,
+ Path = path,
+ Protocol = SasProtocol.None,
+ StartsOn = Recording.UtcNow.AddHours(-1),
+ ExpiresOn = Recording.UtcNow.AddHours(+1),
+ IPRange = new SasIPRange(IPAddress.None, IPAddress.None)
+ };
+ builder.SetPermissions(
+ DataLakeSasPermissions.Read |
+ DataLakeSasPermissions.Add |
+ DataLakeSasPermissions.Create |
+ DataLakeSasPermissions.Delete |
+ DataLakeSasPermissions.Write);
+ return builder.ToSasQueryParameters(userDelegationKey, accountName);
+ }
+
+ //TODO consider removing this.
+ public async Task SetupPathMatchCondition(PathClient path, string match)
+ {
+ if (match == ReceivedETag)
+ {
+ Response headers = await path.GetPropertiesAsync();
+ return headers.Value.ETag.ToString();
+ }
+ else
+ {
+ return match;
+ }
+ }
+
+ //TODO consider removing this.
+ public async Task SetupPathLeaseCondition(PathClient path, string leaseId, string garbageLeaseId)
+ {
+ Models.DataLakeLease lease = null;
+ if (leaseId == ReceivedLeaseId || leaseId == garbageLeaseId)
+ {
+ lease = await InstrumentClient(path.GetLeaseClient(Recording.Random.NewGuid().ToString())).AcquireAsync(DataLakeLeaseClient.InfiniteLeaseDuration);
+ }
+ return leaseId == ReceivedLeaseId ? lease.LeaseId : leaseId;
+ }
+
+ //TODO consider removing this.
+ public async Task SetupFileSystemLeaseCondition(FileSystemClient fileSystem, string leaseId, string garbageLeaseId)
+ {
+ Models.DataLakeLease lease = null;
+ if (leaseId == ReceivedLeaseId || leaseId == garbageLeaseId)
+ {
+ lease = await InstrumentClient(fileSystem.GetLeaseClient(Recording.Random.NewGuid().ToString())).AcquireAsync(DataLakeLeaseClient.InfiniteLeaseDuration);
+ }
+ return leaseId == ReceivedLeaseId ? lease.LeaseId : leaseId;
+ }
+
+ private class DisposingFileSystem : IDisposable
+ {
+ public FileSystemClient FileSystemClient { get; }
+
+ public DisposingFileSystem(FileSystemClient fileSystem, IDictionary metadata, Models.PublicAccessType publicAccessType = default)
+ {
+ fileSystem.CreateAsync(metadata: metadata, publicAccessType: publicAccessType).Wait();
+
+ FileSystemClient = fileSystem;
+ }
+
+ public void Dispose()
+ {
+ if (FileSystemClient != null)
+ {
+ try
+ {
+ FileSystemClient.DeleteAsync().Wait();
+ }
+ catch
+ {
+ // swallow the exception to avoid hiding another test failure
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/sdk/storage/Azure.Storage.Files.DataLake/tests/DirectoryClientTests.cs b/sdk/storage/Azure.Storage.Files.DataLake/tests/DirectoryClientTests.cs
new file mode 100644
index 000000000000..7608a6a3c795
--- /dev/null
+++ b/sdk/storage/Azure.Storage.Files.DataLake/tests/DirectoryClientTests.cs
@@ -0,0 +1,1957 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Security.Cryptography;
+using System.Threading.Tasks;
+using Azure.Core.Testing;
+using Azure.Storage.Files.DataLake.Models;
+using Azure.Storage.Test;
+using NUnit.Framework;
+using Metadata = System.Collections.Generic.IDictionary;
+using TestConstants = Azure.Storage.Test.Constants;
+
+namespace Azure.Storage.Files.DataLake.Tests
+{
+ public class DirectoryClientTests : PathTestBase
+ {
+ public DirectoryClientTests(bool async)
+ : base(async, null /* RecordedTestMode.Record /* to re-record */)
+ {
+ }
+
+ [Test]
+ public async Task CreateAsync()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ var name = GetNewDirectoryName();
+ DirectoryClient directory = InstrumentClient(fileSystem.GetDirectoryClient(name));
+
+ // Act
+ Response response = await directory.CreateAsync();
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ var accountName = new DataLakeUriBuilder(directory.Uri).AccountName;
+ TestHelper.AssertCacheableProperty(accountName, () => directory.AccountName);
+ var fileSystemName = new DataLakeUriBuilder(directory.Uri).FileSystemName;
+ TestHelper.AssertCacheableProperty(fileSystemName, () => directory.FileSystemName);
+ TestHelper.AssertCacheableProperty(name, () => directory.Name);
+ }
+ }
+
+ [Test]
+ public async Task CreateAsync_Error()
+ {
+ // Arrange
+ DataLakeServiceClient service = GetServiceClient_SharedKey();
+ FileSystemClient fileSystem = InstrumentClient(service.GetFileSystemClient(GetNewFileSystemName()));
+ DirectoryClient directory = InstrumentClient(fileSystem.GetDirectoryClient(GetNewDirectoryName()));
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ fileSystem.CreateDirectoryAsync(GetNewDirectoryName()),
+ e => Assert.AreEqual("FilesystemNotFound", e.ErrorCode.Split('\n')[0]));
+ }
+
+ [Test]
+ public async Task CreateAsync_HttpHeaders()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = InstrumentClient(fileSystem.GetDirectoryClient(GetNewDirectoryName()));
+ PathHttpHeaders headers = new PathHttpHeaders
+ {
+ ContentType = ContentType,
+ ContentEncoding = ContentEncoding,
+ ContentLanguage = ContentLanguage,
+ ContentDisposition = ContentDisposition,
+ CacheControl = CacheControl
+ };
+
+ // Act
+ await directory.CreateAsync(httpHeaders: headers);
+
+ // Assert
+ Response response = await directory.GetPropertiesAsync();
+ Assert.AreEqual(ContentType, response.Value.ContentType);
+ Assert.AreEqual(1, response.Value.ContentEncoding.Count());
+ Assert.AreEqual(ContentEncoding, response.Value.ContentEncoding.First());
+ Assert.AreEqual(1, response.Value.ContentLanguage.Count());
+ Assert.AreEqual(ContentLanguage, response.Value.ContentLanguage.First());
+ Assert.AreEqual(ContentDisposition, response.Value.ContentDisposition);
+ Assert.AreEqual(CacheControl, response.Value.CacheControl);
+ }
+ }
+
+ [Test]
+ public async Task CreateAsync_Metadata()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ IDictionary metadata = BuildMetadata();
+ DirectoryClient directory = InstrumentClient(fileSystem.GetDirectoryClient(GetNewDirectoryName()));
+
+ // Act
+ await directory.CreateAsync(metadata: metadata);
+
+ // Assert
+ Response getPropertiesResponse = await directory.GetPropertiesAsync();
+ AssertMetadataEquality(metadata, getPropertiesResponse.Value.Metadata, isDirectory: true);
+ }
+ }
+
+ [Test]
+ public async Task CreateAsync_PermissionAndUmask()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = InstrumentClient(fileSystem.GetDirectoryClient(GetNewDirectoryName()));
+ string permissions = "0777";
+ string umask = "0057";
+
+ // Act
+ await directory.CreateAsync(
+ permissions: permissions,
+ umask: umask);
+
+ // Assert
+ Response response = await directory.GetAccessControlAsync();
+ Assert.AreEqual("rwx-w----", response.Value.Permissions);
+ }
+ }
+
+ [Test]
+ public async Task CreateAsync_AccessConditions()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in AccessConditions_Data)
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ // This directory is intentionally created twice
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.Match = await SetupPathMatchCondition(directory, parameters.Match);
+ parameters.LeaseId = await SetupPathLeaseCondition(directory, parameters.LeaseId, garbageLeaseId);
+
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ Response response = await directory.CreateAsync(
+ conditions: accessConditions);
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ }
+ }
+ }
+
+ [Test]
+ public async Task CreateAsync_AccessConditionsFail()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in GetAccessConditionsFail_Data(garbageLeaseId))
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ // This directory is intentionally created twice
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+ parameters.NoneMatch = await SetupPathMatchCondition(directory, parameters.NoneMatch);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ directory.CreateAsync(conditions: accessConditions),
+ e => { });
+ }
+ }
+ }
+
+ [Test]
+ public async Task DeleteAsync()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ var name = GetNewDirectoryName();
+ DirectoryClient directory = InstrumentClient(fileSystem.GetDirectoryClient(name));
+ await directory.CreateAsync();
+
+ // Act
+ Response response = await directory.DeleteAsync();
+ }
+ }
+
+ [Test]
+ public async Task DeleteAsync_AccessConditions()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in AccessConditions_Data)
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.Match = await SetupPathMatchCondition(directory, parameters.Match);
+ parameters.LeaseId = await SetupPathLeaseCondition(directory, parameters.LeaseId, garbageLeaseId);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ await directory.DeleteAsync(conditions: accessConditions);
+ }
+ }
+ }
+
+ [Test]
+ public async Task DeleteAsync_AccessConditionsFail()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in GetAccessConditionsFail_Data(garbageLeaseId))
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.NoneMatch = await SetupPathMatchCondition(directory, parameters.NoneMatch);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ directory.DeleteAsync(conditions: accessConditions),
+ e => { });
+ }
+ }
+ }
+
+ [Test]
+ public async Task RenameAsync()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient sourceDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+ string destDirectoryName = GetNewDirectoryName();
+
+ // Act
+ DirectoryClient destDirectory = await sourceDirectory.RenameAsync(destinationPath: destDirectoryName);
+
+ // Assert
+ Response response = await destDirectory.GetPropertiesAsync();
+ }
+ }
+
+ [Test]
+ public async Task RenameAsync_Error()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ var sourceDirectoryName = GetNewDirectoryName();
+ DirectoryClient sourceDirectory = InstrumentClient(fileSystem.GetDirectoryClient(sourceDirectoryName));
+ string destPath = GetNewDirectoryName();
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ sourceDirectory.RenameAsync(destinationPath: destPath),
+ e => Assert.AreEqual("SourcePathNotFound", e.ErrorCode.Split('\n')[0]));
+ }
+ }
+
+ [Test]
+ public async Task RenameAsync_DestinationAccessConditions()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in AccessConditions_Data)
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient sourceDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+ DirectoryClient destDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.Match = await SetupPathMatchCondition(destDirectory, parameters.Match);
+ parameters.LeaseId = await SetupPathLeaseCondition(destDirectory, parameters.LeaseId, garbageLeaseId);
+
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ destDirectory = await sourceDirectory.RenameAsync(
+ destinationPath: destDirectory.Name,
+ destConditions: accessConditions);
+
+ // Assert
+ Response response = await destDirectory.GetPropertiesAsync();
+ }
+ }
+ }
+
+ [Test]
+ public async Task RenameAsync_DestinationAccessConditionsFail()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in GetAccessConditionsFail_Data(garbageLeaseId))
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient sourceDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+ DirectoryClient destDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.NoneMatch = await SetupPathMatchCondition(destDirectory, parameters.NoneMatch);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ sourceDirectory.RenameAsync(
+ destinationPath: destDirectory.Name,
+ destConditions: accessConditions),
+ e => { });
+ }
+ }
+ }
+
+ [Test]
+ public async Task RenameAsync_SourceAccessConditions()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in AccessConditions_Data)
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient sourceDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+ DirectoryClient destDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.Match = await SetupPathMatchCondition(sourceDirectory, parameters.Match);
+ parameters.LeaseId = await SetupPathLeaseCondition(sourceDirectory, parameters.LeaseId, garbageLeaseId);
+
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ destDirectory = await sourceDirectory.RenameAsync(
+ destinationPath: destDirectory.Name,
+ sourceConditions: accessConditions);
+
+ // Assert
+ Response response = await destDirectory.GetPropertiesAsync();
+ }
+ }
+ }
+
+ [Test]
+ public async Task RenameAsync_SourceAccessConditionsFail()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in GetAccessConditionsFail_Data(garbageLeaseId))
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient sourceDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+ DirectoryClient destDirectory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.NoneMatch = await SetupPathMatchCondition(sourceDirectory, parameters.NoneMatch);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ sourceDirectory.RenameAsync(
+ destinationPath: destDirectory.Name,
+ sourceConditions: accessConditions),
+ e => { });
+ }
+ }
+ }
+
+ [Test]
+ public async Task GetAccessControlAsync()
+ {
+ using (GetNewDirectory(out DirectoryClient directoryClient))
+ {
+ // Act
+ PathAccessControl accessControl = await directoryClient.GetAccessControlAsync();
+
+ // Assert
+ Assert.IsNotNull(accessControl.Owner);
+ Assert.IsNotNull(accessControl.Group);
+ Assert.IsNotNull(accessControl.Permissions);
+ Assert.IsNotNull(accessControl.Acl);
+ }
+ }
+
+ [Test]
+ public async Task GetAccessControlAsync_Oauth()
+ {
+ DataLakeServiceClient oauthService = GetServiceClient_OAuth();
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName, service: oauthService))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+ DirectoryClient oauthDirectory = oauthService
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName);
+
+ // Act
+ PathAccessControl accessControl = await oauthDirectory.GetAccessControlAsync();
+
+ // Assert
+ Assert.IsNotNull(accessControl.Owner);
+ Assert.IsNotNull(accessControl.Group);
+ Assert.IsNotNull(accessControl.Permissions);
+ Assert.IsNotNull(accessControl.Acl);
+ }
+ }
+
+
+ [Test]
+ public async Task GetAccessControlAsync_FileSystemSAS()
+ {
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ DirectoryClient sasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceSas_FileSystem(
+ fileSystemName: fileSystemName)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ PathAccessControl accessControl = await sasDirectory.GetAccessControlAsync();
+
+ // Assert
+ Assert.IsNotNull(accessControl.Owner);
+ Assert.IsNotNull(accessControl.Group);
+ Assert.IsNotNull(accessControl.Permissions);
+ Assert.IsNotNull(accessControl.Acl);
+ }
+ }
+
+ [Test]
+ public async Task GetAccessControlAsync_FileSystemIdentitySAS()
+ {
+ DataLakeServiceClient oauthService = GetServiceClient_OAuth();
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName, service: oauthService))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ Response userDelegationKey = await oauthService.GetUserDelegationKeyAsync(
+ start: null,
+ expiry: Recording.UtcNow.AddHours(1));
+
+ DirectoryClient identitySasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceIdentitySas_FileSystem(
+ fileSystemName: fileSystemName,
+ userDelegationKey: userDelegationKey)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ PathAccessControl accessControl = await identitySasDirectory.GetAccessControlAsync();
+
+ // Assert
+ Assert.IsNotNull(accessControl.Owner);
+ Assert.IsNotNull(accessControl.Group);
+ Assert.IsNotNull(accessControl.Permissions);
+ Assert.IsNotNull(accessControl.Acl);
+ }
+ }
+
+ [Test]
+ public async Task GetAccessControlAsync_PathSAS()
+ {
+ var fileSystemName = GetNewFileSystemName();
+ var directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ DirectoryClient sasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceSas_Path(
+ fileSystemName: fileSystemName,
+ path: directoryName)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ PathAccessControl accessControl = await sasDirectory.GetAccessControlAsync();
+
+ // Assert
+ Assert.IsNotNull(accessControl.Owner);
+ Assert.IsNotNull(accessControl.Group);
+ Assert.IsNotNull(accessControl.Permissions);
+ Assert.IsNotNull(accessControl.Acl);
+ }
+ }
+
+ [Test]
+ public async Task GetAccessControlAsync_PathIdentitySAS()
+ {
+ DataLakeServiceClient oauthService = GetServiceClient_OAuth();
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName, service: oauthService))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ Response userDelegationKey = await oauthService.GetUserDelegationKeyAsync(
+ start: null,
+ expiry: Recording.UtcNow.AddHours(1));
+
+ DirectoryClient identitySasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceIdentitySas_Path(
+ fileSystemName: fileSystemName,
+ path: directoryName,
+ userDelegationKey: userDelegationKey)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ PathAccessControl accessControl = await identitySasDirectory.GetAccessControlAsync();
+
+ // Assert
+ Assert.IsNotNull(accessControl.Owner);
+ Assert.IsNotNull(accessControl.Group);
+ Assert.IsNotNull(accessControl.Permissions);
+ Assert.IsNotNull(accessControl.Acl);
+ }
+ }
+
+ [Test]
+ public async Task GetAccessControlAsync_Error()
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystemClient))
+ {
+ // Arrange
+ DirectoryClient directory = InstrumentClient(fileSystemClient.GetDirectoryClient(GetNewDirectoryName()));
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ directory.GetAccessControlAsync(),
+ e => Assert.AreEqual("404", e.ErrorCode));
+ }
+ }
+
+ [Test]
+ public async Task GetAccessControlAsync_AccessConditions()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in AccessConditions_Data)
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.Match = await SetupPathMatchCondition(directory, parameters.Match);
+ parameters.LeaseId = await SetupPathLeaseCondition(directory, parameters.LeaseId, garbageLeaseId);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ await directory.GetAccessControlAsync(conditions: accessConditions);
+ }
+ }
+ }
+
+ [Ignore("service bug")]
+ [Test]
+ public async Task GetAccessControlAsync_AccessConditionsFail()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in GetAccessConditionsFail_Data(garbageLeaseId))
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.NoneMatch = await SetupPathMatchCondition(directory, parameters.NoneMatch);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(parameters);
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ directory.GetAccessControlAsync(conditions: accessConditions),
+ e => { });
+ }
+ }
+ }
+
+ [Test]
+ public async Task SetAccessControlAsync()
+ {
+ using (GetNewDirectory(out DirectoryClient directoryClient))
+ {
+ // Arrange
+ PathAccessControl accessControl = new PathAccessControl()
+ {
+ Permissions = "0777"
+ };
+
+ // Act
+ Response response = await directoryClient.SetAccessControlAsync(accessControl);
+
+ // Assert
+ AssertValidStoragePathInfo(response);
+ }
+ }
+
+ [Test]
+ public async Task SetAccessControlAsync_Error()
+ {
+ using (GetNewDirectory(out DirectoryClient directoryClient))
+ {
+ // Arrange
+ PathAccessControl accessControl = new PathAccessControl()
+ {
+ Permissions = "asdf"
+ };
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ directoryClient.SetAccessControlAsync(accessControl),
+ e =>
+ {
+ Assert.AreEqual("InvalidPermission", e.ErrorCode);
+ Assert.AreEqual("The permission value is invalid.", e.Message.Split('\n')[0]);
+ });
+ }
+ }
+
+ [Test]
+ public async Task SetAccessControlAsync_AccessConditions()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in AccessConditions_Data)
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.Match = await SetupPathMatchCondition(directory, parameters.Match);
+ parameters.LeaseId = await SetupPathLeaseCondition(directory, parameters.LeaseId, garbageLeaseId);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(
+ parameters: parameters,
+ lease: true);
+
+ // Act
+ Response response = await directory.SetAccessControlAsync(
+ accessControl: new PathAccessControl()
+ {
+ Permissions = "0777"
+ },
+ conditions: accessConditions);
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ }
+ }
+ }
+
+ [Test]
+ public async Task SetAccessControlAsync_AccessConditionsFail()
+ {
+ var garbageLeaseId = GetGarbageLeaseId();
+ foreach (AccessConditionParameters parameters in GetAccessConditionsFail_Data(garbageLeaseId))
+ {
+ using (GetNewFileSystem(out FileSystemClient fileSystem))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(GetNewDirectoryName());
+
+ parameters.NoneMatch = await SetupPathMatchCondition(directory, parameters.NoneMatch);
+ DataLakeRequestConditions accessConditions = BuildDataLakeRequestAccessConditions(parameters);
+
+ // Act
+ await TestHelper.AssertExpectedExceptionAsync(
+ directory.SetAccessControlAsync(
+ accessControl: new PathAccessControl()
+ {
+ Permissions = "0777"
+ },
+ conditions: accessConditions),
+ e => { });
+ }
+ }
+ }
+
+ [Test]
+ public async Task GetPropertiesAsync()
+ {
+ using (GetNewDirectory(out DirectoryClient directory))
+ {
+ // Act
+ Response response = await directory.GetPropertiesAsync();
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ }
+ }
+
+ [Test]
+ public async Task GetPropertiesAsync_Oauth()
+ {
+ DataLakeServiceClient oauthService = GetServiceClient_OAuth();
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName, service: oauthService))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+ DirectoryClient oauthDirectory = oauthService
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName);
+
+ // Act
+ Response response = await directory.GetPropertiesAsync();
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ }
+ }
+
+ [Test]
+ public async Task GetPropertiesAsync_FileSystemSAS()
+ {
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ DirectoryClient sasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceSas_FileSystem(
+ fileSystemName: fileSystemName)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ Response response = await sasDirectory.GetPropertiesAsync();
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ var accountName = new DataLakeUriBuilder(fileSystem.Uri).AccountName;
+ TestHelper.AssertCacheableProperty(accountName, () => directory.AccountName);
+ TestHelper.AssertCacheableProperty(fileSystemName, () => directory.FileSystemName);
+ TestHelper.AssertCacheableProperty(directoryName, () => directory.Name);
+ }
+ }
+
+ [Test]
+ public async Task GetPropertiesAsync_FileSystemIdentitySAS()
+ {
+ DataLakeServiceClient oauthService = GetServiceClient_OAuth();
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName, service: oauthService))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ Response userDelegationKey = await oauthService.GetUserDelegationKeyAsync(
+ start: null,
+ expiry: Recording.UtcNow.AddHours(1));
+
+ DirectoryClient identitySasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceIdentitySas_FileSystem(
+ fileSystemName: fileSystemName,
+ userDelegationKey: userDelegationKey)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ Response response = await identitySasDirectory.GetPropertiesAsync();
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ }
+ }
+
+ [Test]
+ public async Task GetPropertiesAsync_PathSAS()
+ {
+ var fileSystemName = GetNewFileSystemName();
+ var directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ DirectoryClient sasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceSas_Path(
+ fileSystemName: fileSystemName,
+ path: directoryName)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ Response response = await sasDirectory.GetPropertiesAsync();
+
+ // Assert
+ Assert.IsNotNull(response.GetRawResponse().Headers.RequestId);
+ }
+ }
+
+ [Test]
+ public async Task GetPropertiesAsync_PathIdentitySAS()
+ {
+ DataLakeServiceClient oauthService = GetServiceClient_OAuth();
+ string fileSystemName = GetNewFileSystemName();
+ string directoryName = GetNewDirectoryName();
+ using (GetNewFileSystem(out FileSystemClient fileSystem, fileSystemName: fileSystemName, service: oauthService))
+ {
+ // Arrange
+ DirectoryClient directory = await fileSystem.CreateDirectoryAsync(directoryName);
+
+ Response userDelegationKey = await oauthService.GetUserDelegationKeyAsync(
+ start: null,
+ expiry: Recording.UtcNow.AddHours(1));
+
+ DirectoryClient identitySasDirectory = InstrumentClient(
+ GetServiceClient_DataLakeServiceIdentitySas_Path(
+ fileSystemName: fileSystemName,
+ path: directoryName,
+ userDelegationKey: userDelegationKey)
+ .GetFileSystemClient(fileSystemName)
+ .GetDirectoryClient(directoryName));
+
+ // Act
+ Response