diff --git a/src/storage-preview/HISTORY.rst b/src/storage-preview/HISTORY.rst
index 14b9b8cbb18..61f37386135 100644
--- a/src/storage-preview/HISTORY.rst
+++ b/src/storage-preview/HISTORY.rst
@@ -2,6 +2,10 @@
Release History
===============
+0.7.3(2021-05-20)
+++++++++++++++++++
+* Support soft delete for ADLS Gen2 account
+
0.7.2(2021-04-09)
++++++++++++++++++
* Remove `az storage blob service-properties` as it is supported in storage-blob-preview extension and Azure CLI
diff --git a/src/storage-preview/README.md b/src/storage-preview/README.md
index 45275956394..99a7d21ecc9 100644
--- a/src/storage-preview/README.md
+++ b/src/storage-preview/README.md
@@ -306,4 +306,69 @@ az storage account file-service-properties update \
-g MyResourceGroup
```
+#### Soft Delete for ADLS Gen2 storage
+##### Prepare resource
+1. ADLS Gen2 storage account with soft delete support
+```
+az storage account create \
+ -n myadls \
+ -g myresourcegroup \
+ --hns
+```
+To get connection string, you could use the following command:
+```
+az storage account show-connection-string \
+ -n myadls \
+ -g myresourcegroup
+```
+2. Prepare file system in the ADLS Gen2 storage account
+```
+az storage fs create \
+ -n myfilesystem \
+ --connection-string myconnectionstring
+```
+##### Enable delete retention
+```
+az storage fs service-properties update \
+ --delete-retention \
+ --delete-retention-period 5 \
+ --connection-string myconnectionstring
+```
+##### Upload file to file system
+```
+az storage fs file upload \
+ -s ".\test.txt" \
+ -p test \
+ -f filesystemcetk2triyptlaa \
+ --connection-string $con
+```
+##### List deleted path
+```
+az storage fs file delete \
+ -p test \
+ -f filesystemcetk2triyptlaa \
+ --connection-string $con
+```
+##### List deleted path
+```
+az storage fs list-deleted-path \
+ -f filesystemcetk2triyptlaa \
+ --connection-string $con
+```
+##### Undelete deleted path
+```
+az storage fs undelete-path \
+ -f filesystemcetk2triyptlaa \
+ -f filesystemcetk2triyptlaa \
+ --deleted-path-name test \
+ --deleted-path-version 132549163 \
+ --connection-string $con
+```
+##### Disable delete retention
+```
+az storage fs service-properties update \
+ --delete-retention false \
+ --connection-string $con
+```
+
If you have issues, please give feedback by opening an issue at https://github.com/Azure/azure-cli-extensions/issues.
\ No newline at end of file
diff --git a/src/storage-preview/azext_storage_preview/__init__.py b/src/storage-preview/azext_storage_preview/__init__.py
index 4bf7c3b14ac..9a732e9a2c5 100644
--- a/src/storage-preview/azext_storage_preview/__init__.py
+++ b/src/storage-preview/azext_storage_preview/__init__.py
@@ -8,8 +8,8 @@
from azure.cli.core.commands import AzCommandGroup, AzArgumentContext
import azext_storage_preview._help # pylint: disable=unused-import
-from .profiles import (CUSTOM_DATA_STORAGE, CUSTOM_MGMT_PREVIEW_STORAGE, CUSTOM_DATA_STORAGE_ADLS,
- CUSTOM_DATA_STORAGE_FILESHARE)
+from .profiles import CUSTOM_DATA_STORAGE, CUSTOM_MGMT_PREVIEW_STORAGE, CUSTOM_DATA_STORAGE_ADLS, \
+ CUSTOM_DATA_STORAGE_FILESHARE, CUSTOM_DATA_STORAGE_FILEDATALAKE
class StorageCommandsLoader(AzCommandsLoader):
@@ -20,6 +20,8 @@ def __init__(self, cli_ctx=None):
register_resource_type('latest', CUSTOM_DATA_STORAGE_ADLS, '2019-02-02-preview')
register_resource_type('latest', CUSTOM_MGMT_PREVIEW_STORAGE, '2020-08-01-preview')
register_resource_type('latest', CUSTOM_DATA_STORAGE_FILESHARE, '2020-02-10')
+ register_resource_type('latest', CUSTOM_DATA_STORAGE_FILEDATALAKE, '2020-06-12')
+
storage_custom = CliCommandType(operations_tmpl='azext_storage_preview.custom#{}')
super(StorageCommandsLoader, self).__init__(cli_ctx=cli_ctx,
@@ -63,8 +65,9 @@ def register_content_settings_argument(self, settings_class, update, arg_group=N
self.ignore('content_settings')
- # The parameter process_md5 is used to determine whether it is compatible with the process_md5 parameter type of Python SDK
- # When the Python SDK is fixed (Issue: https://github.com/Azure/azure-sdk-for-python/issues/15919),
+ # The parameter process_md5 is used to determine whether it is compatible with the process_md5 parameter
+ # type of Python SDK When the Python SDK is fixed
+ # (Issue: https://github.com/Azure/azure-sdk-for-python/issues/15919),
# this parameter should not be passed in any more
self.extra('content_type', default=None, help='The content MIME type.', arg_group=arg_group,
validator=get_content_setting_validator(settings_class, update, guess_from_file=guess_from_file,
diff --git a/src/storage-preview/azext_storage_preview/_client_factory.py b/src/storage-preview/azext_storage_preview/_client_factory.py
index 98b2c0a3384..fca61a5808c 100644
--- a/src/storage-preview/azext_storage_preview/_client_factory.py
+++ b/src/storage-preview/azext_storage_preview/_client_factory.py
@@ -8,7 +8,9 @@
from knack.util import CLIError
from knack.log import get_logger
-from .profiles import CUSTOM_DATA_STORAGE, CUSTOM_MGMT_PREVIEW_STORAGE, CUSTOM_DATA_STORAGE_FILESHARE
+
+from .profiles import CUSTOM_DATA_STORAGE, CUSTOM_MGMT_PREVIEW_STORAGE, CUSTOM_DATA_STORAGE_FILESHARE, \
+ CUSTOM_DATA_STORAGE_FILEDATALAKE
MISSING_CREDENTIALS_ERROR_MESSAGE = """
Missing credentials to access storage service. The following variations are accepted:
@@ -136,7 +138,6 @@ def cf_mgmt_file_services(cli_ctx, _):
def get_account_url(cli_ctx, account_name, service):
- from knack.util import CLIError
if account_name is None:
raise CLIError("Please provide storage account name or connection string.")
storage_endpoint = cli_ctx.cloud.suffixes.storage_endpoint
@@ -172,3 +173,29 @@ def cf_share_directory_client(cli_ctx, kwargs):
def cf_share_file_client(cli_ctx, kwargs):
return cf_share_client(cli_ctx, kwargs).get_file_client(file_path=kwargs.pop('file_path'))
+
+
+def cf_adls_service(cli_ctx, kwargs):
+ client_kwargs = {}
+ t_adls_service = get_sdk(cli_ctx, CUSTOM_DATA_STORAGE_FILEDATALAKE,
+ '_data_lake_service_client#DataLakeServiceClient')
+ connection_string = kwargs.pop('connection_string', None)
+ account_name = kwargs.pop('account_name', None)
+ account_key = kwargs.pop('account_key', None)
+ token_credential = kwargs.pop('token_credential', None)
+ sas_token = kwargs.pop('sas_token', None)
+ # Enable NetworkTraceLoggingPolicy which logs all headers (except Authorization) without being redacted
+ client_kwargs['logging_enable'] = True
+ if connection_string:
+ return t_adls_service.from_connection_string(conn_str=connection_string, **client_kwargs)
+
+ account_url = get_account_url(cli_ctx, account_name=account_name, service='dfs')
+ credential = account_key or sas_token or token_credential
+
+ if account_url and credential:
+ return t_adls_service(account_url=account_url, credential=credential, **client_kwargs)
+ return None
+
+
+def cf_adls_file_system(cli_ctx, kwargs):
+ return cf_adls_service(cli_ctx, kwargs).get_file_system_client(file_system=kwargs.pop('file_system_name'))
diff --git a/src/storage-preview/azext_storage_preview/_help.py b/src/storage-preview/azext_storage_preview/_help.py
index 1fbca22ffeb..86ae1be7195 100644
--- a/src/storage-preview/azext_storage_preview/_help.py
+++ b/src/storage-preview/azext_storage_preview/_help.py
@@ -394,3 +394,45 @@
- name: Upload a set of files in a local directory to a storage blob directory.
text: az storage blob directory upload -c MyContainer --account-name MyStorageAccount -s "path/to/file*" -d directory --recursive
"""
+
+helps['storage fs list-deleted-path'] = """
+type: command
+short-summary: List the deleted (file or directory) paths under the specified file system.
+examples:
+ - name: List the deleted (file or directory) paths under the specified file system..
+ text: |
+ az storage fs list-deleted-path -f myfilesystem --account-name mystorageccount --account-key 00000000
+"""
+
+helps['storage fs service-properties'] = """
+type: group
+short-summary: Manage storage datalake service properties.
+"""
+
+helps['storage fs service-properties show'] = """
+type: command
+short-summary: Show the properties of a storage account's datalake service, including Azure Storage Analytics.
+examples:
+ - name: Show the properties of a storage account's datalake service
+ text: |
+ az storage fs service-properties show --account-name mystorageccount --account-key 00000000
+"""
+
+helps['storage fs service-properties update'] = """
+type: command
+short-summary: Update the properties of a storage account's datalake service, including Azure Storage Analytics.
+examples:
+ - name: Update the properties of a storage account's datalake service
+ text: |
+ az storage fs service-properties update --delete-retention --delete-retention-period 7 --account-name mystorageccount --account-key 00000000
+"""
+
+helps['storage fs undelete-path'] = """
+type: command
+short-summary: Restore soft-deleted path.
+long-summary: Operation will only be successful if used within the specified number of days set in the delete retention policy.
+examples:
+ - name: Restore soft-deleted path.
+ text: |
+ az storage fs undelete-path -f myfilesystem --deleted-path-name dir --deletion-id 0000 --account-name mystorageccount --account-key 00000000
+"""
diff --git a/src/storage-preview/azext_storage_preview/_params.py b/src/storage-preview/azext_storage_preview/_params.py
index 652844d7b9c..ed1f98ba5cc 100644
--- a/src/storage-preview/azext_storage_preview/_params.py
+++ b/src/storage-preview/azext_storage_preview/_params.py
@@ -13,7 +13,7 @@
validate_storage_data_plane_list,
process_resource_group, add_upload_progress_callback)
-from .profiles import CUSTOM_MGMT_PREVIEW_STORAGE
+from .profiles import CUSTOM_MGMT_PREVIEW_STORAGE, CUSTOM_DATA_STORAGE_FILEDATALAKE
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements
@@ -54,6 +54,9 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
'e.g."user::rwx,user:john.doe@contoso:rwx,group::r--,other::---,mask::rwx".')
progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.',
action='store_true', validator=add_upload_progress_callback)
+ timeout_type = CLIArgumentType(
+ help='Request timeout in seconds. Applies to each call to the service.', type=int
+ )
with self.argument_context('storage') as c:
c.argument('container_name', container_name_type)
@@ -157,15 +160,15 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
with self.argument_context('storage blob service-properties update') as c:
c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
- help='Enables soft-delete.')
+ help='Enable soft-delete.')
c.argument('days_retained', type=int, arg_group='Soft Delete',
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(),
- help='Enables static-website.')
+ help='Enable static-website.')
c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".',
arg_group='Static Website')
c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
- help='Represents the path to the error document that should be shown when an error 404 is issued,'
+ help='Represent the path to the error document that should be shown when an error 404 is issued,'
' in other words, when a browser requests a page that does not exist.')
with self.argument_context('storage azcopy blob upload') as c:
@@ -374,3 +377,34 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings',
process_md5=True)
c.extra('no_progress', progress_type)
+
+ with self.argument_context('storage fs service-properties update', resource_type=CUSTOM_DATA_STORAGE_FILEDATALAKE,
+ min_api='2020-06-12') as c:
+ c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
+ help='Enable soft-delete.')
+ c.argument('delete_retention_period', type=int, arg_group='Soft Delete',
+ options_list=['--delete-retention-period', '--period'],
+ help='Number of days that soft-deleted fs will be retained. Must be in range [1,365].')
+ c.argument('enable_static_website', options_list=['--static-website'], arg_group='Static Website',
+ arg_type=get_three_state_flag(),
+ help='Enable static-website.')
+ c.argument('index_document', help='Represent the name of the index document. This is commonly "index.html".',
+ arg_group='Static Website')
+ c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
+ help='Represent the path to the error document that should be shown when an error 404 is issued,'
+ ' in other words, when a browser requests a page that does not exist.')
+
+ for item in ['list-deleted-path', 'undelete-path']:
+ with self.argument_context('storage fs {}'.format(item)) as c:
+ c.extra('file_system_name', options_list=['--file-system', '-f'],
+ help="File system name.", required=True)
+ c.extra('timeout', timeout_type)
+
+ with self.argument_context('storage fs list-deleted-path') as c:
+ c.argument('path_prefix', help='Filter the results to return only paths under the specified path.')
+ c.argument('num_results', type=int, help='Specify the maximum number to return.')
+ c.argument('marker', help='A string value that identifies the portion of the list of containers to be '
+ 'returned with the next listing operation. The operation returns the NextMarker value within '
+ 'the response body if the listing operation did not return all containers remaining to be listed '
+ 'with the current page. If specified, this generator will begin returning results from the point '
+ 'where the previous generator stopped.')
diff --git a/src/storage-preview/azext_storage_preview/_transformers.py b/src/storage-preview/azext_storage_preview/_transformers.py
index 6701d696a39..daeb724b61e 100644
--- a/src/storage-preview/azext_storage_preview/_transformers.py
+++ b/src/storage-preview/azext_storage_preview/_transformers.py
@@ -122,5 +122,6 @@ def transform_storage_list_output(result):
return list(result)
+# pylint: disable=unused-argument
def transform_file_upload(result):
return None
diff --git a/src/storage-preview/azext_storage_preview/_validators.py b/src/storage-preview/azext_storage_preview/_validators.py
index b42ac0bfb83..440597a2428 100644
--- a/src/storage-preview/azext_storage_preview/_validators.py
+++ b/src/storage-preview/azext_storage_preview/_validators.py
@@ -76,59 +76,85 @@ def validate_bypass(namespace):
namespace.bypass = ', '.join(namespace.bypass) if isinstance(namespace.bypass, list) else namespace.bypass
+def get_config_value(cmd, section, key, default):
+ return cmd.cli_ctx.config.get(section, key, default)
+
+
+def is_storagev2(import_prefix):
+ return import_prefix.startswith('azure.multiapi.storagev2.') or 'datalake' in import_prefix
+
+
def validate_client_parameters(cmd, namespace):
""" Retrieves storage connection parameters from environment variables and parses out connection string into
account name and key """
n = namespace
- def get_config_value(section, key, default):
- return cmd.cli_ctx.config.get(section, key, default)
-
if hasattr(n, 'auth_mode'):
- auth_mode = n.auth_mode or get_config_value('storage', 'auth_mode', None)
+ auth_mode = n.auth_mode or get_config_value(cmd, 'storage', 'auth_mode', None)
del n.auth_mode
if not n.account_name:
- n.account_name = get_config_value('storage', 'account', None)
+ n.account_name = get_config_value(cmd, 'storage', 'account', None)
if auth_mode == 'login':
- n.token_credential = _create_token_credential(cmd.cli_ctx)
-
- # give warning if there are account key args being ignored
- account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
- n.connection_string and "--connection-string"]
- account_key_args = [arg for arg in account_key_args if arg]
-
- if account_key_args:
- logger.warning('In "login" auth mode, the following arguments are ignored: %s',
- ' ,'.join(account_key_args))
- return
+ prefix = cmd.command_kwargs['resource_type'].import_prefix
+ # is_storagv2() is used to distinguish if the command is in track2 SDK
+ # If yes, we will use get_login_credentials() as token credential
+ if is_storagev2(prefix):
+ from azure.cli.core._profile import Profile
+ profile = Profile(cli_ctx=cmd.cli_ctx)
+ n.token_credential, _, _ = profile.get_login_credentials(subscription_id=n._subscription)
+ # Otherwise, we will assume it is in track1 and keep previous token updater
+ else:
+ n.token_credential = _create_token_credential(cmd.cli_ctx)
+
+ if hasattr(n, 'token_credential') and n.token_credential:
+ # give warning if there are account key args being ignored
+ account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
+ n.connection_string and "--connection-string"]
+ account_key_args = [arg for arg in account_key_args if arg]
+
+ if account_key_args:
+ logger.warning('In "login" auth mode, the following arguments are ignored: %s',
+ ' ,'.join(account_key_args))
+ return
if not n.connection_string:
- n.connection_string = get_config_value('storage', 'connection_string', None)
+ n.connection_string = get_config_value(cmd, 'storage', 'connection_string', None)
# if connection string supplied or in environment variables, extract account key and name
if n.connection_string:
conn_dict = validate_key_value_pairs(n.connection_string)
n.account_name = conn_dict.get('AccountName')
n.account_key = conn_dict.get('AccountKey')
- if not n.account_name or not n.account_key:
- raise CLIError('Connection-string: %s, is malformed. Some shell environments require the '
- 'connection string to be surrounded by quotes.' % n.connection_string)
+ n.sas_token = conn_dict.get('SharedAccessSignature')
# otherwise, simply try to retrieve the remaining variables from environment variables
if not n.account_name:
- n.account_name = get_config_value('storage', 'account', None)
+ n.account_name = get_config_value(cmd, 'storage', 'account', None)
if not n.account_key:
- n.account_key = get_config_value('storage', 'key', None)
+ n.account_key = get_config_value(cmd, 'storage', 'key', None)
if not n.sas_token:
- n.sas_token = get_config_value('storage', 'sas_token', None)
+ n.sas_token = get_config_value(cmd, 'storage', 'sas_token', None)
# strip the '?' from sas token. the portal and command line are returns sas token in different
# forms
if n.sas_token:
n.sas_token = n.sas_token.lstrip('?')
+ # account name with secondary
+ if n.account_name and n.account_name.endswith('-secondary'):
+ n.location_mode = 'secondary'
+ n.account_name = n.account_name[:-10]
+
# if account name is specified but no key, attempt to query
if n.account_name and not n.account_key and not n.sas_token:
+ logger.warning('There are no credentials provided in your command and environment, we will query for the '
+ 'account key inside your storage account. \nPlease provide --connection-string, '
+ '--account-key or --sas-token as credentials, or use `--auth-mode login` if you '
+ 'have required RBAC roles in your command. For more information about RBAC roles '
+ 'in storage, visit '
+ 'https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli. \n'
+ 'Setting the corresponding environment variables can avoid inputting credentials in '
+ 'your command. Please use --help to get more information.')
n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
diff --git a/src/storage-preview/azext_storage_preview/commands.py b/src/storage-preview/azext_storage_preview/commands.py
index 5182560d391..d68777fef1c 100644
--- a/src/storage-preview/azext_storage_preview/commands.py
+++ b/src/storage-preview/azext_storage_preview/commands.py
@@ -7,9 +7,10 @@
from azure.cli.core.commands.arm import show_exception_handler
from ._client_factory import (cf_sa, cf_blob_data_gen_update,
blob_data_service_factory, adls_blob_data_service_factory,
- cf_sa_blob_inventory, cf_mgmt_file_services, cf_share_client, cf_share_file_client)
+ cf_sa_blob_inventory, cf_mgmt_file_services, cf_share_client, cf_share_file_client,
+ cf_adls_service, cf_adls_file_system)
from .profiles import (CUSTOM_DATA_STORAGE, CUSTOM_DATA_STORAGE_ADLS, CUSTOM_MGMT_PREVIEW_STORAGE,
- CUSTOM_DATA_STORAGE_FILESHARE)
+ CUSTOM_DATA_STORAGE_FILESHARE, CUSTOM_DATA_STORAGE_FILEDATALAKE)
def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-statements
@@ -205,3 +206,26 @@ def _adls_deprecate_message(self):
g.storage_custom_command('upload', 'storage_file_upload', transform=transform_file_upload)
g.storage_custom_command('upload-batch', 'storage_file_upload_batch',
custom_command_type=get_custom_sdk('file', client_factory=cf_share_client))
+
+ adls_fs_service_sdk = CliCommandType(
+ operations_tmpl='azext_storage_preview.vendored_sdks.azure_storage_filedatalake._data_lake_service_client#DataLakeServiceClient.{}',
+ client_factory=cf_adls_service,
+ resource_type=CUSTOM_DATA_STORAGE_FILEDATALAKE
+ )
+
+ with self.command_group('storage fs service-properties', command_type=adls_fs_service_sdk,
+ custom_command_type=get_custom_sdk('filesystem', cf_adls_service),
+ resource_type=CUSTOM_DATA_STORAGE_FILEDATALAKE, min_api='2020-06-12', is_preview=True) as g:
+ g.storage_command_oauth('show', 'get_service_properties', exception_handler=show_exception_handler)
+ g.storage_custom_command_oauth('update', 'set_service_properties')
+
+ adls_fs_sdk = CliCommandType(
+ operations_tmpl='azext_storage_preview.vendored_sdks.azure_storage_filedatalake._file_system_client#FileSystemClient.{}',
+ client_factory=cf_adls_file_system,
+ resource_type=CUSTOM_DATA_STORAGE_FILEDATALAKE
+ )
+ with self.command_group('storage fs', command_type=adls_fs_sdk,
+ custom_command_type=get_custom_sdk('filesystem', cf_adls_file_system),
+ resource_type=CUSTOM_DATA_STORAGE_FILEDATALAKE, min_api='2020-06-12', is_preview=True) as g:
+ g.storage_custom_command_oauth('list-deleted-path', 'list_deleted_path')
+ g.storage_command_oauth('undelete-path', '_undelete_path')
diff --git a/src/storage-preview/azext_storage_preview/operations/filesystem.py b/src/storage-preview/azext_storage_preview/operations/filesystem.py
new file mode 100644
index 00000000000..39663ca23cd
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/operations/filesystem.py
@@ -0,0 +1,61 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+from knack.util import CLIError
+from ..profiles import CUSTOM_DATA_STORAGE_FILEDATALAKE
+
+
+def list_deleted_path(client, marker=None, num_results=None, path_prefix=None, timeout=None, **kwargs):
+ from ..track2_util import list_generator
+
+ generator = client.list_deleted_paths(path_prefix=path_prefix, timeout=timeout, max_results=num_results, **kwargs)
+
+ pages = generator.by_page(continuation_token=marker) # BlobPropertiesPaged
+ result = list_generator(pages=pages, num_results=num_results)
+
+ return result
+
+
+def set_service_properties(cmd, client, delete_retention=None, delete_retention_period=None,
+ enable_static_website=False, index_document=None, error_document_404_path=None):
+ parameters = client.get_service_properties()
+ # update
+ kwargs = {}
+ delete_retention_policy = cmd.get_models('_models#RetentionPolicy', resource_type=CUSTOM_DATA_STORAGE_FILEDATALAKE)()
+ if parameters.get('delete_retention_policy', None):
+ delete_retention_policy = parameters['delete_retention_policy']
+ if delete_retention is not None:
+ delete_retention_policy.enabled = delete_retention
+ if delete_retention_period is not None:
+ delete_retention_policy.days = delete_retention_period
+ delete_retention_policy.allow_permanent_delete = False
+
+ static_website = cmd.get_models('_models#StaticWebsite', resource_type=CUSTOM_DATA_STORAGE_FILEDATALAKE)()
+ if parameters.get('static_website', None):
+ static_website = parameters['static_website']
+
+ if static_website is not None:
+ static_website.enabled = enable_static_website
+ if index_document is not None:
+ static_website.index_document = index_document
+ if error_document_404_path is not None:
+ static_website.error_document_404_path = error_document_404_path
+
+ if parameters.get('hour_metrics', None):
+ kwargs['hour_metrics'] = parameters['hour_metrics']
+ if parameters.get('logging', None):
+ kwargs['logging'] = parameters['logging']
+ if parameters.get('minute_metrics', None):
+ kwargs['minute_metrics'] = parameters['minute_metrics']
+ if parameters.get('cors', None):
+ kwargs['cors'] = parameters['cors']
+
+ # checks
+ if delete_retention_policy and delete_retention_policy.enabled and not delete_retention_policy.days:
+ raise CLIError("must specify days-retained")
+
+ client.set_service_properties(delete_retention_policy=delete_retention_policy, static_website=static_website,
+ **kwargs)
+ return client.get_service_properties()
diff --git a/src/storage-preview/azext_storage_preview/profiles.py b/src/storage-preview/azext_storage_preview/profiles.py
index e6f1fb57d6f..d7d214a190d 100644
--- a/src/storage-preview/azext_storage_preview/profiles.py
+++ b/src/storage-preview/azext_storage_preview/profiles.py
@@ -12,3 +12,5 @@
'StorageManagementClient')
CUSTOM_DATA_STORAGE_FILESHARE = CustomResourceType('azext_storage_preview.vendored_sdks.azure_storagev2.fileshare',
None)
+CUSTOM_DATA_STORAGE_FILEDATALAKE = CustomResourceType('azext_storage_preview.vendored_sdks.azure_storage_filedatalake',
+ None)
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/__init__.py b/src/storage-preview/azext_storage_preview/tests/latest/__init__.py
index 69793457c2a..4173b34be53 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/__init__.py
+++ b/src/storage-preview/azext_storage_preview/tests/latest/__init__.py
@@ -4,5 +4,6 @@
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import register_resource_type
-from ...profiles import CUSTOM_MGMT_PREVIEW_STORAGE
+from ...profiles import CUSTOM_MGMT_PREVIEW_STORAGE, CUSTOM_DATA_STORAGE_FILEDATALAKE
register_resource_type('latest', CUSTOM_MGMT_PREVIEW_STORAGE, '2020-08-01-preview')
+register_resource_type('latest', CUSTOM_DATA_STORAGE_FILEDATALAKE, '2020-06-12')
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_blob_inventory_policy.yaml b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_blob_inventory_policy.yaml
index 2147aaefda1..ae3b76b687f 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_blob_inventory_policy.yaml
+++ b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_blob_inventory_policy.yaml
@@ -15,12 +15,12 @@ interactions:
ParameterSetName:
- -n -g --query -o
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/listKeys?api-version=2021-02-01&$expand=kerb
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/listKeys?api-version=2021-04-01&$expand=kerb
response:
body:
- string: '{"keys":[{"creationTime":"2021-04-28T08:46:15.4646357Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-04-28T08:46:15.4646357Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
+ string: '{"keys":[{"creationTime":"2021-05-20T06:29:43.9323587Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-05-20T06:29:43.9323587Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
headers:
cache-control:
- no-cache
@@ -29,7 +29,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:36 GMT
+ - Thu, 20 May 2021 06:30:05 GMT
expires:
- '-1'
pragma:
@@ -45,7 +45,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-resource-requests:
- - '11999'
+ - '11997'
status:
code: 200
message: OK
@@ -57,9 +57,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:37 GMT
+ - Thu, 20 May 2021 06:30:06 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -71,11 +71,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:30:08 GMT
etag:
- - '"0x8D90A222473B17B"'
+ - '"0x8D91B58B743140F"'
last-modified:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:30:09 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-version:
@@ -103,21 +103,21 @@ interactions:
ParameterSetName:
- --account-name -g --policy
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-04-28T08:46:42.3233993Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"]}}}]}}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-05-20T06:30:11.3234716Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":[]}}}]}}}'
headers:
cache-control:
- no-cache
content-length:
- - '587'
+ - '604'
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:42 GMT
+ - Thu, 20 May 2021 06:30:11 GMT
expires:
- '-1'
pragma:
@@ -133,7 +133,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1196'
+ - '1197'
status:
code: 200
message: OK
@@ -151,21 +151,21 @@ interactions:
ParameterSetName:
- --account-name -g
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-04-28T08:46:42.3233993Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"]}}}]}}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-05-20T06:30:11.3234716Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":[]}}}]}}}'
headers:
cache-control:
- no-cache
content-length:
- - '587'
+ - '604'
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:30:13 GMT
expires:
- '-1'
pragma:
@@ -197,9 +197,9 @@ interactions:
ParameterSetName:
- -n -g --enable-versioning
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/blobServices/default?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/blobServices/default?api-version=2021-04-01
response:
body:
string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/blobServices/default","name":"default","type":"Microsoft.Storage/storageAccounts/blobServices","properties":{"cors":{"corsRules":[]},"deleteRetentionPolicy":{"enabled":false}}}'
@@ -211,7 +211,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:44 GMT
+ - Thu, 20 May 2021 06:30:14 GMT
expires:
- '-1'
pragma:
@@ -248,9 +248,9 @@ interactions:
ParameterSetName:
- -n -g --enable-versioning
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/blobServices/default?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/blobServices/default?api-version=2021-04-01
response:
body:
string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/blobServices/default","name":"default","type":"Microsoft.Storage/storageAccounts/blobServices","properties":{"cors":{"corsRules":[]},"deleteRetentionPolicy":{"enabled":false},"isVersioningEnabled":true}}'
@@ -262,7 +262,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:44 GMT
+ - Thu, 20 May 2021 06:30:15 GMT
expires:
- '-1'
pragma:
@@ -278,7 +278,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1197'
+ - '1196'
status:
code: 200
message: OK
@@ -304,12 +304,12 @@ interactions:
ParameterSetName:
- --account-name -g --policy
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-04-28T08:46:46.2235568Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-05-20T06:30:17.5361977Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
headers:
cache-control:
- no-cache
@@ -318,7 +318,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:30:17 GMT
expires:
- '-1'
pragma:
@@ -352,12 +352,12 @@ interactions:
ParameterSetName:
- --account-name -g
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-04-28T08:46:46.2235568Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-05-20T06:30:17.5361977Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
headers:
cache-control:
- no-cache
@@ -366,7 +366,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:30:18 GMT
expires:
- '-1'
pragma:
@@ -398,12 +398,12 @@ interactions:
ParameterSetName:
- --account-name -g --set
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-04-28T08:46:46.2235568Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-05-20T06:30:17.5361977Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"inventoryPolicyRule1","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
headers:
cache-control:
- no-cache
@@ -412,7 +412,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:30:19 GMT
expires:
- '-1'
pragma:
@@ -451,12 +451,12 @@ interactions:
ParameterSetName:
- --account-name -g --set
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-04-28T08:46:50.0893480Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"newname","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-05-20T06:30:21.7466235Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"newname","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
headers:
cache-control:
- no-cache
@@ -465,7 +465,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:49 GMT
+ - Thu, 20 May 2021 06:30:21 GMT
expires:
- '-1'
pragma:
@@ -499,12 +499,12 @@ interactions:
ParameterSetName:
- --account-name -g
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-04-28T08:46:50.0893480Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"newname","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default","name":"DefaultInventoryPolicy","type":"Microsoft.Storage/storageAccounts/inventoryPolicies","properties":{"lastModifiedTime":"2021-05-20T06:30:21.7466235Z","policy":{"enabled":true,"type":"Inventory","destination":"mycontainer","rules":[{"enabled":true,"name":"newname","definition":{"filters":{"blobTypes":["blockBlob"],"prefixMatch":["inventoryprefix1","inventoryprefix2"],"includeBlobVersions":true,"includeSnapshots":true}}}]}}}'
headers:
cache-control:
- no-cache
@@ -513,7 +513,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:50 GMT
+ - Thu, 20 May 2021 06:30:23 GMT
expires:
- '-1'
pragma:
@@ -547,7 +547,7 @@ interactions:
ParameterSetName:
- --account-name -g -y
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: DELETE
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
@@ -561,7 +561,7 @@ interactions:
content-type:
- text/plain; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:30:23 GMT
expires:
- '-1'
pragma:
@@ -573,7 +573,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-deletes:
- - '14997'
+ - '14999'
status:
code: 200
message: OK
@@ -591,7 +591,7 @@ interactions:
ParameterSetName:
- --account-name -g
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_blob_inventory000001/providers/Microsoft.Storage/storageAccounts/clitest000002/inventoryPolicies/default?api-version=2020-08-01-preview
response:
@@ -606,7 +606,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:30:25 GMT
expires:
- '-1'
pragma:
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_network_rules.yaml b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_network_rules.yaml
index 5c49f9db0a9..afd4116a212 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_network_rules.yaml
+++ b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_network_rules.yaml
@@ -13,15 +13,12 @@ interactions:
ParameterSetName:
- -g -n --bypass --default-action --https-only
User-Agent:
- - python/3.7.7 (Windows-10-10.0.19041-SP0) msrest/0.6.21 msrest_azure/0.6.3
- azure-mgmt-resource/12.1.0 Azure-SDK-For-Python AZURECLI/2.22.1
- accept-language:
- - en-US
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-resource/16.1.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_storage_service_endpoints000001?api-version=2020-10-01
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001","name":"cli_test_storage_service_endpoints000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2021-04-28T08:57:31Z"},"properties":{"provisioningState":"Succeeded"}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001","name":"cli_test_storage_service_endpoints000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2021-05-20T08:16:43Z"},"properties":{"provisioningState":"Succeeded"}}'
headers:
cache-control:
- no-cache
@@ -30,7 +27,7 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:01 GMT
+ - Thu, 20 May 2021 08:17:13 GMT
expires:
- '-1'
pragma:
@@ -65,9 +62,9 @@ interactions:
ParameterSetName:
- -g -n --bypass --default-action --https-only
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-04-01
response:
body:
string: ''
@@ -79,11 +76,11 @@ interactions:
content-type:
- text/plain; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:08 GMT
+ - Thu, 20 May 2021 08:17:19 GMT
expires:
- '-1'
location:
- - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/f40cc7b9-4320-47c2-b0d4-e9bb1c532c53?monitor=true&api-version=2021-02-01
+ - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/f5cd050c-d228-4e7a-be6c-b01d071d2589?monitor=true&api-version=2021-04-01
pragma:
- no-cache
server:
@@ -93,7 +90,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1194'
+ - '1199'
status:
code: 202
message: Accepted
@@ -111,12 +108,12 @@ interactions:
ParameterSetName:
- -g -n --bypass --default-action --https-only
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/f40cc7b9-4320-47c2-b0d4-e9bb1c532c53?monitor=true&api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/f5cd050c-d228-4e7a-be6c-b01d071d2589?monitor=true&api-version=2021-04-01
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-04-28T08:58:06.5795218Z","key2":"2021-04-28T08:58:06.5795218Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Metrics","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-05-20T08:17:18.6257026Z","key2":"2021-05-20T08:17:18.6257026Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Metrics","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -125,7 +122,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:25 GMT
+ - Thu, 20 May 2021 08:17:36 GMT
expires:
- '-1'
pragma:
@@ -157,12 +154,12 @@ interactions:
ParameterSetName:
- -g -n --bypass --default-action
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-04-01
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-04-28T08:58:06.5795218Z","key2":"2021-04-28T08:58:06.5795218Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Metrics","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-05-20T08:17:18.6257026Z","key2":"2021-05-20T08:17:18.6257026Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Metrics","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -171,7 +168,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:26 GMT
+ - Thu, 20 May 2021 08:17:39 GMT
expires:
- '-1'
pragma:
@@ -211,12 +208,12 @@ interactions:
ParameterSetName:
- -g -n --bypass --default-action
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-04-01
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-04-28T08:58:06.5795218Z","key2":"2021-04-28T08:58:06.5795218Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-05-20T08:17:18.6257026Z","key2":"2021-05-20T08:17:18.6257026Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -225,7 +222,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:28 GMT
+ - Thu, 20 May 2021 08:17:43 GMT
expires:
- '-1'
pragma:
@@ -241,7 +238,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1194'
+ - '1199'
status:
code: 200
message: OK
@@ -259,12 +256,12 @@ interactions:
ParameterSetName:
- -g -n --set
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-04-01
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-04-28T08:58:06.5795218Z","key2":"2021-04-28T08:58:06.5795218Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-05-20T08:17:18.6257026Z","key2":"2021-05-20T08:17:18.6257026Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -273,7 +270,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:29 GMT
+ - Thu, 20 May 2021 08:17:45 GMT
expires:
- '-1'
pragma:
@@ -313,12 +310,12 @@ interactions:
ParameterSetName:
- -g -n --set
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2021-04-01
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-04-28T08:58:06.5795218Z","key2":"2021-04-28T08:58:06.5795218Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-05-20T08:17:18.6257026Z","key2":"2021-05-20T08:17:18.6257026Z"},"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -327,7 +324,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:32 GMT
+ - Thu, 20 May 2021 08:17:49 GMT
expires:
- '-1'
pragma:
@@ -343,7 +340,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1193'
+ - '1198'
status:
code: 200
message: OK
@@ -361,15 +358,12 @@ interactions:
ParameterSetName:
- -g -n --subnet-name
User-Agent:
- - python/3.7.7 (Windows-10-10.0.19041-SP0) msrest/0.6.21 msrest_azure/0.6.3
- azure-mgmt-resource/12.1.0 Azure-SDK-For-Python AZURECLI/2.22.1
- accept-language:
- - en-US
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-resource/16.1.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_storage_service_endpoints000001?api-version=2020-10-01
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001","name":"cli_test_storage_service_endpoints000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2021-04-28T08:57:31Z"},"properties":{"provisioningState":"Succeeded"}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001","name":"cli_test_storage_service_endpoints000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2021-05-20T08:16:43Z"},"properties":{"provisioningState":"Succeeded"}}'
headers:
cache-control:
- no-cache
@@ -378,7 +372,7 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:33 GMT
+ - Thu, 20 May 2021 08:17:51 GMT
expires:
- '-1'
pragma:
@@ -395,7 +389,8 @@ interactions:
- request:
body: '{"location": "westus", "tags": {}, "properties": {"addressSpace": {"addressPrefixes":
["10.0.0.0/16"]}, "dhcpOptions": {}, "subnets": [{"name": "subnet1", "properties":
- {"addressPrefix": "10.0.0.0/24"}}]}}'
+ {"addressPrefix": "10.0.0.0/24", "privateEndpointNetworkPolicies": "Enabled",
+ "privateLinkServiceNetworkPolicies": "Enabled"}}]}}'
headers:
Accept:
- application/json
@@ -406,27 +401,27 @@ interactions:
Connection:
- keep-alive
Content-Length:
- - '205'
+ - '298'
Content-Type:
- application/json
ParameterSetName:
- -g -n --subnet-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-network/18.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-network/19.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1?api-version=2020-11-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1?api-version=2021-02-01
response:
body:
string: "{\r\n \"name\": \"vnet1\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1\",\r\n
- \ \"etag\": \"W/\\\"34e76dfc-b03b-48d5-a2dd-29c54e8f3633\\\"\",\r\n \"type\":
+ \ \"etag\": \"W/\\\"428d700e-c45c-4d82-93b3-2c2e2ce197d9\\\"\",\r\n \"type\":
\"Microsoft.Network/virtualNetworks\",\r\n \"location\": \"westus\",\r\n
\ \"tags\": {},\r\n \"properties\": {\r\n \"provisioningState\": \"Updating\",\r\n
- \ \"resourceGuid\": \"830744c6-bfc9-4f79-868b-2c5ecbbf44c5\",\r\n \"addressSpace\":
+ \ \"resourceGuid\": \"dc4731c4-ca21-4718-bac9-e97ba9ed8c9e\",\r\n \"addressSpace\":
{\r\n \"addressPrefixes\": [\r\n \"10.0.0.0/16\"\r\n ]\r\n
\ },\r\n \"dhcpOptions\": {\r\n \"dnsServers\": []\r\n },\r\n
\ \"subnets\": [\r\n {\r\n \"name\": \"subnet1\",\r\n \"id\":
\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1\",\r\n
- \ \"etag\": \"W/\\\"34e76dfc-b03b-48d5-a2dd-29c54e8f3633\\\"\",\r\n
+ \ \"etag\": \"W/\\\"428d700e-c45c-4d82-93b3-2c2e2ce197d9\\\"\",\r\n
\ \"properties\": {\r\n \"provisioningState\": \"Updating\",\r\n
\ \"addressPrefix\": \"10.0.0.0/24\",\r\n \"delegations\":
[],\r\n \"privateEndpointNetworkPolicies\": \"Enabled\",\r\n \"privateLinkServiceNetworkPolicies\":
@@ -437,7 +432,7 @@ interactions:
azure-asyncnotification:
- Enabled
azure-asyncoperation:
- - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/a442b174-af75-4d4f-a55a-4545a6c91300?api-version=2020-11-01
+ - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/e2f965f4-60b0-4297-a1bf-923c43e63d78?api-version=2021-02-01
cache-control:
- no-cache
content-length:
@@ -445,7 +440,7 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:39 GMT
+ - Thu, 20 May 2021 08:17:57 GMT
expires:
- '-1'
pragma:
@@ -458,9 +453,9 @@ interactions:
x-content-type-options:
- nosniff
x-ms-arm-service-request-id:
- - ed585475-6787-4863-81e5-38bc1d1ce041
+ - 517ccb35-fcfa-4e97-8165-630f36c1915a
x-ms-ratelimit-remaining-subscription-writes:
- - '1197'
+ - '1198'
status:
code: 201
message: Created
@@ -478,9 +473,9 @@ interactions:
ParameterSetName:
- -g -n --subnet-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-network/18.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-network/19.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/a442b174-af75-4d4f-a55a-4545a6c91300?api-version=2020-11-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/e2f965f4-60b0-4297-a1bf-923c43e63d78?api-version=2021-02-01
response:
body:
string: "{\r\n \"status\": \"Succeeded\"\r\n}"
@@ -492,7 +487,7 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:42 GMT
+ - Thu, 20 May 2021 08:18:00 GMT
expires:
- '-1'
pragma:
@@ -509,7 +504,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-arm-service-request-id:
- - e1a7ff6d-bb00-4aee-b4f0-2c4f0d260857
+ - a2309b38-2c43-4790-89eb-7dd881616486
status:
code: 200
message: OK
@@ -527,21 +522,21 @@ interactions:
ParameterSetName:
- -g -n --subnet-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-network/18.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-network/19.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1?api-version=2020-11-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1?api-version=2021-02-01
response:
body:
string: "{\r\n \"name\": \"vnet1\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1\",\r\n
- \ \"etag\": \"W/\\\"3caca246-35df-4268-8b50-a28e90d33bd3\\\"\",\r\n \"type\":
+ \ \"etag\": \"W/\\\"164bfe00-67f4-45d2-8568-5ed59b255b71\\\"\",\r\n \"type\":
\"Microsoft.Network/virtualNetworks\",\r\n \"location\": \"westus\",\r\n
\ \"tags\": {},\r\n \"properties\": {\r\n \"provisioningState\": \"Succeeded\",\r\n
- \ \"resourceGuid\": \"830744c6-bfc9-4f79-868b-2c5ecbbf44c5\",\r\n \"addressSpace\":
+ \ \"resourceGuid\": \"dc4731c4-ca21-4718-bac9-e97ba9ed8c9e\",\r\n \"addressSpace\":
{\r\n \"addressPrefixes\": [\r\n \"10.0.0.0/16\"\r\n ]\r\n
\ },\r\n \"dhcpOptions\": {\r\n \"dnsServers\": []\r\n },\r\n
\ \"subnets\": [\r\n {\r\n \"name\": \"subnet1\",\r\n \"id\":
\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1\",\r\n
- \ \"etag\": \"W/\\\"3caca246-35df-4268-8b50-a28e90d33bd3\\\"\",\r\n
+ \ \"etag\": \"W/\\\"164bfe00-67f4-45d2-8568-5ed59b255b71\\\"\",\r\n
\ \"properties\": {\r\n \"provisioningState\": \"Succeeded\",\r\n
\ \"addressPrefix\": \"10.0.0.0/24\",\r\n \"delegations\":
[],\r\n \"privateEndpointNetworkPolicies\": \"Enabled\",\r\n \"privateLinkServiceNetworkPolicies\":
@@ -556,9 +551,9 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:43 GMT
+ - Thu, 20 May 2021 08:18:00 GMT
etag:
- - W/"3caca246-35df-4268-8b50-a28e90d33bd3"
+ - W/"164bfe00-67f4-45d2-8568-5ed59b255b71"
expires:
- '-1'
pragma:
@@ -575,7 +570,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-arm-service-request-id:
- - 647bc434-6a85-46d6-b599-c857dac607a9
+ - e04a1f63-f04d-41db-88f1-e52f3ccb579e
status:
code: 200
message: OK
@@ -593,13 +588,13 @@ interactions:
ParameterSetName:
- -g --vnet-name -n --service-endpoints
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-network/18.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-network/19.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1?api-version=2020-11-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1?api-version=2021-02-01
response:
body:
string: "{\r\n \"name\": \"subnet1\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1\",\r\n
- \ \"etag\": \"W/\\\"3caca246-35df-4268-8b50-a28e90d33bd3\\\"\",\r\n \"properties\":
+ \ \"etag\": \"W/\\\"164bfe00-67f4-45d2-8568-5ed59b255b71\\\"\",\r\n \"properties\":
{\r\n \"provisioningState\": \"Succeeded\",\r\n \"addressPrefix\": \"10.0.0.0/24\",\r\n
\ \"delegations\": [],\r\n \"privateEndpointNetworkPolicies\": \"Enabled\",\r\n
\ \"privateLinkServiceNetworkPolicies\": \"Enabled\"\r\n },\r\n \"type\":
@@ -612,9 +607,9 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:43 GMT
+ - Thu, 20 May 2021 08:18:01 GMT
etag:
- - W/"3caca246-35df-4268-8b50-a28e90d33bd3"
+ - W/"164bfe00-67f4-45d2-8568-5ed59b255b71"
expires:
- '-1'
pragma:
@@ -631,15 +626,16 @@ interactions:
x-content-type-options:
- nosniff
x-ms-arm-service-request-id:
- - c475c16b-c3e7-470f-9c94-016fb862bed2
+ - be87d9f3-7b35-4d57-80d7-27fc2edc8cee
status:
code: 200
message: OK
- request:
body: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1",
- "name": "subnet1", "properties": {"addressPrefix": "10.0.0.0/24", "serviceEndpoints":
- [{"service": "Microsoft.Storage"}], "delegations": [], "privateEndpointNetworkPolicies":
- "Enabled", "privateLinkServiceNetworkPolicies": "Enabled"}}'
+ "name": "subnet1", "type": "Microsoft.Network/virtualNetworks/subnets", "properties":
+ {"addressPrefix": "10.0.0.0/24", "serviceEndpoints": [{"service": "Microsoft.Storage"}],
+ "delegations": [], "privateEndpointNetworkPolicies": "Enabled", "privateLinkServiceNetworkPolicies":
+ "Enabled"}}'
headers:
Accept:
- application/json
@@ -650,19 +646,19 @@ interactions:
Connection:
- keep-alive
Content-Length:
- - '453'
+ - '506'
Content-Type:
- application/json
ParameterSetName:
- -g --vnet-name -n --service-endpoints
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-network/18.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-network/19.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1?api-version=2020-11-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1?api-version=2021-02-01
response:
body:
string: "{\r\n \"name\": \"subnet1\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1\",\r\n
- \ \"etag\": \"W/\\\"3a637d35-fd18-43c6-b2e3-5a9c77a381cf\\\"\",\r\n \"properties\":
+ \ \"etag\": \"W/\\\"8f6af4f9-435a-4e15-997c-8158a5afc6b2\\\"\",\r\n \"properties\":
{\r\n \"provisioningState\": \"Updating\",\r\n \"addressPrefix\": \"10.0.0.0/24\",\r\n
\ \"serviceEndpoints\": [\r\n {\r\n \"provisioningState\": \"Updating\",\r\n
\ \"service\": \"Microsoft.Storage\",\r\n \"locations\": [\r\n
@@ -672,7 +668,7 @@ interactions:
\ },\r\n \"type\": \"Microsoft.Network/virtualNetworks/subnets\"\r\n}"
headers:
azure-asyncoperation:
- - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/a51bf41c-60c2-4436-a514-b7ddfcec5929?api-version=2020-11-01
+ - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/1dabde99-cee9-4457-8751-d30407e10363?api-version=2021-02-01
cache-control:
- no-cache
content-length:
@@ -680,7 +676,7 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:43 GMT
+ - Thu, 20 May 2021 08:18:02 GMT
expires:
- '-1'
pragma:
@@ -697,9 +693,9 @@ interactions:
x-content-type-options:
- nosniff
x-ms-arm-service-request-id:
- - d7640dc0-d7c2-4762-a0be-e8db4c078b90
+ - d0969b21-4b7f-4714-9fbd-0972afeca4f0
x-ms-ratelimit-remaining-subscription-writes:
- - '1192'
+ - '1199'
status:
code: 200
message: OK
@@ -717,9 +713,9 @@ interactions:
ParameterSetName:
- -g --vnet-name -n --service-endpoints
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-network/18.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-network/19.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/a51bf41c-60c2-4436-a514-b7ddfcec5929?api-version=2020-11-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Network/locations/westus/operations/1dabde99-cee9-4457-8751-d30407e10363?api-version=2021-02-01
response:
body:
string: "{\r\n \"status\": \"Succeeded\"\r\n}"
@@ -731,7 +727,7 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:47 GMT
+ - Thu, 20 May 2021 08:18:05 GMT
expires:
- '-1'
pragma:
@@ -748,7 +744,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-arm-service-request-id:
- - 1679ba27-9f50-4125-b8f0-b9d00edd73ea
+ - 1931ad7d-a1b9-461d-8bd8-cba797ce0125
status:
code: 200
message: OK
@@ -766,13 +762,13 @@ interactions:
ParameterSetName:
- -g --vnet-name -n --service-endpoints
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-network/18.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-network/19.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1?api-version=2020-11-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1?api-version=2021-02-01
response:
body:
string: "{\r\n \"name\": \"subnet1\",\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1\",\r\n
- \ \"etag\": \"W/\\\"58da0849-9d5e-4210-8e39-04ce32b8cecd\\\"\",\r\n \"properties\":
+ \ \"etag\": \"W/\\\"90ccc9b8-fed5-4820-8ad3-3c2e6a8181b9\\\"\",\r\n \"properties\":
{\r\n \"provisioningState\": \"Succeeded\",\r\n \"addressPrefix\": \"10.0.0.0/24\",\r\n
\ \"serviceEndpoints\": [\r\n {\r\n \"provisioningState\": \"Succeeded\",\r\n
\ \"service\": \"Microsoft.Storage\",\r\n \"locations\": [\r\n
@@ -788,9 +784,9 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:58:48 GMT
+ - Thu, 20 May 2021 08:18:06 GMT
etag:
- - W/"58da0849-9d5e-4210-8e39-04ce32b8cecd"
+ - W/"90ccc9b8-fed5-4820-8ad3-3c2e6a8181b9"
expires:
- '-1'
pragma:
@@ -807,7 +803,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-arm-service-request-id:
- - b29a4da0-25d1-458f-a3bc-babdffd50fcf
+ - c80e7490-8240-41d3-8119-fa68d7113dfc
status:
code: 200
message: OK
@@ -825,12 +821,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -839,7 +835,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:49 GMT
+ - Thu, 20 May 2021 08:18:08 GMT
expires:
- '-1'
pragma:
@@ -877,12 +873,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -891,7 +887,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:50 GMT
+ - Thu, 20 May 2021 08:18:09 GMT
expires:
- '-1'
pragma:
@@ -907,7 +903,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1196'
+ - '1198'
status:
code: 200
message: OK
@@ -925,12 +921,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -939,7 +935,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:52 GMT
+ - Thu, 20 May 2021 08:18:10 GMT
expires:
- '-1'
pragma:
@@ -977,12 +973,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -991,7 +987,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:52 GMT
+ - Thu, 20 May 2021 08:18:10 GMT
expires:
- '-1'
pragma:
@@ -1007,7 +1003,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1197'
+ - '1199'
status:
code: 200
message: OK
@@ -1025,12 +1021,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1039,7 +1035,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:52 GMT
+ - Thu, 20 May 2021 08:18:11 GMT
expires:
- '-1'
pragma:
@@ -1077,12 +1073,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1091,7 +1087,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:54 GMT
+ - Thu, 20 May 2021 08:18:12 GMT
expires:
- '-1'
pragma:
@@ -1107,7 +1103,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1195'
+ - '1199'
status:
code: 200
message: OK
@@ -1125,12 +1121,12 @@ interactions:
ParameterSetName:
- -g --account-name --vnet-name --subnet
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1139,7 +1135,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:55 GMT
+ - Thu, 20 May 2021 08:18:13 GMT
expires:
- '-1'
pragma:
@@ -1178,12 +1174,12 @@ interactions:
ParameterSetName:
- -g --account-name --vnet-name --subnet
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1192,7 +1188,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:56 GMT
+ - Thu, 20 May 2021 08:18:15 GMT
expires:
- '-1'
pragma:
@@ -1208,7 +1204,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1196'
+ - '1198'
status:
code: 200
message: OK
@@ -1226,12 +1222,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1240,7 +1236,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:58 GMT
+ - Thu, 20 May 2021 08:18:16 GMT
expires:
- '-1'
pragma:
@@ -1272,12 +1268,12 @@ interactions:
ParameterSetName:
- -g --account-name --vnet-name --subnet
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1286,7 +1282,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:59 GMT
+ - Thu, 20 May 2021 08:18:17 GMT
expires:
- '-1'
pragma:
@@ -1325,12 +1321,12 @@ interactions:
ParameterSetName:
- -g --account-name --vnet-name --subnet
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1339,7 +1335,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:59:00 GMT
+ - Thu, 20 May 2021 08:18:17 GMT
expires:
- '-1'
pragma:
@@ -1355,7 +1351,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1195'
+ - '1199'
status:
code: 200
message: OK
@@ -1373,12 +1369,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1387,7 +1383,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:59:01 GMT
+ - Thu, 20 May 2021 08:18:20 GMT
expires:
- '-1'
pragma:
@@ -1419,12 +1415,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.1.2.3","action":"Allow"},{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1433,7 +1429,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:59:03 GMT
+ - Thu, 20 May 2021 08:18:20 GMT
expires:
- '-1'
pragma:
@@ -1472,12 +1468,12 @@ interactions:
ParameterSetName:
- -g --account-name --ip-address
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1486,7 +1482,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:59:04 GMT
+ - Thu, 20 May 2021 08:18:23 GMT
expires:
- '-1'
pragma:
@@ -1520,12 +1516,12 @@ interactions:
ParameterSetName:
- -g --account-name --vnet-name --subnet
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1","action":"Allow","state":"Succeeded"}],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1534,7 +1530,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:59:05 GMT
+ - Thu, 20 May 2021 08:18:24 GMT
expires:
- '-1'
pragma:
@@ -1572,12 +1568,12 @@ interactions:
ParameterSetName:
- -g --account-name --vnet-name --subnet
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1586,7 +1582,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:59:06 GMT
+ - Thu, 20 May 2021 08:18:25 GMT
expires:
- '-1'
pragma:
@@ -1602,7 +1598,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1196'
+ - '1199'
status:
code: 200
message: OK
@@ -1620,12 +1616,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:58:06.5795218Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:58:06.4857581Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/cli000003","name":"cli000003","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"Logging","virtualNetworkRules":[],"ipRules":[{"value":"25.2.0.0/24","action":"Allow"}],"defaultAction":"Deny"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:17:18.6257026Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T08:17:18.5632132Z","primaryEndpoints":{"dfs":"https://cli000003.dfs.core.windows.net/","web":"https://cli000003.z22.web.core.windows.net/","blob":"https://cli000003.blob.core.windows.net/","queue":"https://cli000003.queue.core.windows.net/","table":"https://cli000003.table.core.windows.net/","file":"https://cli000003.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://cli000003-secondary.dfs.core.windows.net/","web":"https://cli000003-secondary.z22.web.core.windows.net/","blob":"https://cli000003-secondary.blob.core.windows.net/","queue":"https://cli000003-secondary.queue.core.windows.net/","table":"https://cli000003-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -1634,7 +1630,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:59:07 GMT
+ - Thu, 20 May 2021 08:18:26 GMT
expires:
- '-1'
pragma:
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_resource_access_rules.yaml b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_resource_access_rules.yaml
index d0b359f7977..9934a1d153d 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_resource_access_rules.yaml
+++ b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_account_resource_access_rules.yaml
@@ -13,12 +13,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -27,7 +27,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:00 GMT
+ - Thu, 20 May 2021 08:17:14 GMT
expires:
- '-1'
pragma:
@@ -65,12 +65,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -79,7 +79,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:00 GMT
+ - Thu, 20 May 2021 08:17:15 GMT
expires:
- '-1'
pragma:
@@ -95,7 +95,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1194'
+ - '1199'
status:
code: 200
message: OK
@@ -113,12 +113,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -127,7 +127,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:02 GMT
+ - Thu, 20 May 2021 08:17:16 GMT
expires:
- '-1'
pragma:
@@ -159,12 +159,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -173,7 +173,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:04 GMT
+ - Thu, 20 May 2021 08:17:17 GMT
expires:
- '-1'
pragma:
@@ -211,12 +211,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -225,7 +225,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:04 GMT
+ - Thu, 20 May 2021 08:17:18 GMT
expires:
- '-1'
pragma:
@@ -241,7 +241,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1195'
+ - '1199'
status:
code: 200
message: OK
@@ -259,12 +259,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -273,7 +273,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:04 GMT
+ - Thu, 20 May 2021 08:17:19 GMT
expires:
- '-1'
pragma:
@@ -305,12 +305,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -319,7 +319,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:05 GMT
+ - Thu, 20 May 2021 08:17:20 GMT
expires:
- '-1'
pragma:
@@ -358,12 +358,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -372,7 +372,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:06 GMT
+ - Thu, 20 May 2021 08:17:21 GMT
expires:
- '-1'
pragma:
@@ -388,7 +388,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1199'
+ - '1198'
status:
code: 200
message: OK
@@ -406,12 +406,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -420,7 +420,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:07 GMT
+ - Thu, 20 May 2021 08:17:23 GMT
expires:
- '-1'
pragma:
@@ -452,12 +452,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -466,7 +466,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:08 GMT
+ - Thu, 20 May 2021 08:17:24 GMT
expires:
- '-1'
pragma:
@@ -506,12 +506,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -520,7 +520,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:09 GMT
+ - Thu, 20 May 2021 08:17:25 GMT
expires:
- '-1'
pragma:
@@ -536,7 +536,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1197'
+ - '1199'
status:
code: 200
message: OK
@@ -554,12 +554,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -568,7 +568,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:10 GMT
+ - Thu, 20 May 2021 08:17:26 GMT
expires:
- '-1'
pragma:
@@ -600,12 +600,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace1"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -614,7 +614,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:10 GMT
+ - Thu, 20 May 2021 08:17:26 GMT
expires:
- '-1'
pragma:
@@ -653,12 +653,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -667,7 +667,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:11 GMT
+ - Thu, 20 May 2021 08:17:26 GMT
expires:
- '-1'
pragma:
@@ -683,7 +683,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1193'
+ - '1199'
status:
code: 200
message: OK
@@ -701,12 +701,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -715,7 +715,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:12 GMT
+ - Thu, 20 May 2021 08:17:28 GMT
expires:
- '-1'
pragma:
@@ -747,12 +747,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace2"},{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -761,7 +761,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:14 GMT
+ - Thu, 20 May 2021 08:17:29 GMT
expires:
- '-1'
pragma:
@@ -799,12 +799,12 @@ interactions:
ParameterSetName:
- -g --account-name --resource-id --tenant-id
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PATCH
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -813,7 +813,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:14 GMT
+ - Thu, 20 May 2021 08:17:30 GMT
expires:
- '-1'
pragma:
@@ -847,12 +847,12 @@ interactions:
ParameterSetName:
- -g --account-name
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/16.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002?api-version=2020-08-01-preview
response:
body:
- string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:57:39.3448564Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-04-28T08:57:39.2510451Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
+ string: '{"sku":{"name":"Standard_LRS","tier":"Standard"},"kind":"Storage","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_storage_service_endpoints000001/providers/Microsoft.Storage/storageAccounts/clitest000002","name":"clitest000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"privateEndpointConnections":[],"networkAcls":{"resourceAccessRules":[{"tenantId":"72f988bf-86f1-41af-91ab-2d7cd011db47","resourceId":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/res9407/providers/Microsoft.Synapse/workspaces/testworkspace3"}],"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T08:16:53.4692259Z"}},"keySource":"Microsoft.Storage"},"provisioningState":"Succeeded","creationTime":"2021-05-20T08:16:53.3911452Z","primaryEndpoints":{"blob":"https://clitest000002.blob.core.windows.net/","queue":"https://clitest000002.queue.core.windows.net/","table":"https://clitest000002.table.core.windows.net/","file":"https://clitest000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available"}}'
headers:
cache-control:
- no-cache
@@ -861,7 +861,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:58:15 GMT
+ - Thu, 20 May 2021 08:17:32 GMT
expires:
- '-1'
pragma:
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob.yaml b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob.yaml
index 51a39eab7a5..37c81f783fd 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob.yaml
+++ b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob.yaml
@@ -13,15 +13,12 @@ interactions:
ParameterSetName:
- -n -g --kind --hierarchical-namespace --https-only
User-Agent:
- - python/3.7.7 (Windows-10-10.0.19041-SP0) msrest/0.6.21 msrest_azure/0.6.3
- azure-mgmt-resource/12.1.0 Azure-SDK-For-Python AZURECLI/2.22.1
- accept-language:
- - en-US
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-resource/16.1.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/clitest.rg000001?api-version=2020-10-01
response:
body:
- string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001","name":"clitest.rg000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2021-04-28T08:46:04Z"},"properties":{"provisioningState":"Succeeded"}}'
+ string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001","name":"clitest.rg000001","type":"Microsoft.Resources/resourceGroups","location":"westus","tags":{"product":"azurecli","cause":"automation","date":"2021-05-20T06:23:49Z"},"properties":{"provisioningState":"Succeeded"}}'
headers:
cache-control:
- no-cache
@@ -30,7 +27,7 @@ interactions:
content-type:
- application/json; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:46:08 GMT
+ - Thu, 20 May 2021 06:23:54 GMT
expires:
- '-1'
pragma:
@@ -64,9 +61,9 @@ interactions:
ParameterSetName:
- -n -g --kind --hierarchical-namespace --https-only
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002?api-version=2021-04-01
response:
body:
string: ''
@@ -78,11 +75,11 @@ interactions:
content-type:
- text/plain; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:46:15 GMT
+ - Thu, 20 May 2021 06:24:00 GMT
expires:
- '-1'
location:
- - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/08812eed-30e7-44b2-ab50-9ccd0b2c6259?monitor=true&api-version=2021-02-01
+ - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/b266b4d6-8b2a-490c-8539-f62f18252b99?monitor=true&api-version=2021-04-01
pragma:
- no-cache
server:
@@ -92,7 +89,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1196'
+ - '1198'
status:
code: 202
message: Accepted
@@ -110,12 +107,12 @@ interactions:
ParameterSetName:
- -n -g --kind --hierarchical-namespace --https-only
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/08812eed-30e7-44b2-ab50-9ccd0b2c6259?monitor=true&api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/westus/asyncoperations/b266b4d6-8b2a-490c-8539-f62f18252b99?monitor=true&api-version=2021-04-01
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002","name":"clitestaldsaccount000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-04-28T08:46:13.4098354Z","key2":"2021-04-28T08:46:13.4098354Z"},"privateEndpointConnections":[],"isHnsEnabled":true,"networkAcls":{"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:46:13.4098354Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:46:13.4098354Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:46:13.3160854Z","primaryEndpoints":{"dfs":"https://clitestaldsaccount000002.dfs.core.windows.net/","web":"https://clitestaldsaccount000002.z22.web.core.windows.net/","blob":"https://clitestaldsaccount000002.blob.core.windows.net/","queue":"https://clitestaldsaccount000002.queue.core.windows.net/","table":"https://clitestaldsaccount000002.table.core.windows.net/","file":"https://clitestaldsaccount000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://clitestaldsaccount000002-secondary.dfs.core.windows.net/","web":"https://clitestaldsaccount000002-secondary.z22.web.core.windows.net/","blob":"https://clitestaldsaccount000002-secondary.blob.core.windows.net/","queue":"https://clitestaldsaccount000002-secondary.queue.core.windows.net/","table":"https://clitestaldsaccount000002-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002","name":"clitestaldsaccount000002","type":"Microsoft.Storage/storageAccounts","location":"westus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-05-20T06:23:59.2005008Z","key2":"2021-05-20T06:23:59.2005008Z"},"privateEndpointConnections":[],"isHnsEnabled":true,"networkAcls":{"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T06:23:59.2005008Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T06:23:59.2005008Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T06:23:59.1067665Z","primaryEndpoints":{"dfs":"https://clitestaldsaccount000002.dfs.core.windows.net/","web":"https://clitestaldsaccount000002.z22.web.core.windows.net/","blob":"https://clitestaldsaccount000002.blob.core.windows.net/","queue":"https://clitestaldsaccount000002.queue.core.windows.net/","table":"https://clitestaldsaccount000002.table.core.windows.net/","file":"https://clitestaldsaccount000002.file.core.windows.net/"},"primaryLocation":"westus","statusOfPrimary":"available","secondaryLocation":"eastus","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://clitestaldsaccount000002-secondary.dfs.core.windows.net/","web":"https://clitestaldsaccount000002-secondary.z22.web.core.windows.net/","blob":"https://clitestaldsaccount000002-secondary.blob.core.windows.net/","queue":"https://clitestaldsaccount000002-secondary.queue.core.windows.net/","table":"https://clitestaldsaccount000002-secondary.table.core.windows.net/"}}}'
headers:
cache-control:
- no-cache
@@ -124,7 +121,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:32 GMT
+ - Thu, 20 May 2021 06:24:17 GMT
expires:
- '-1'
pragma:
@@ -158,12 +155,12 @@ interactions:
ParameterSetName:
- -n -g --query -o
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002/listKeys?api-version=2021-02-01&$expand=kerb
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002/listKeys?api-version=2021-04-01&$expand=kerb
response:
body:
- string: '{"keys":[{"creationTime":"2021-04-28T08:46:13.4098354Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-04-28T08:46:13.4098354Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
+ string: '{"keys":[{"creationTime":"2021-05-20T06:23:59.2005008Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-05-20T06:23:59.2005008Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
headers:
cache-control:
- no-cache
@@ -172,7 +169,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:33 GMT
+ - Thu, 20 May 2021 06:24:18 GMT
expires:
- '-1'
pragma:
@@ -200,9 +197,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:34 GMT
+ - Thu, 20 May 2021 06:24:19 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -214,11 +211,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:35 GMT
+ - Thu, 20 May 2021 06:24:21 GMT
etag:
- - '"0x8D90A22216F5300"'
+ - '"0x8D91B57E8140429"'
last-modified:
- - Wed, 28 Apr 2021 08:46:35 GMT
+ - Thu, 20 May 2021 06:24:21 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-version:
@@ -232,9 +229,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:35 GMT
+ - Thu, 20 May 2021 06:24:22 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -244,7 +241,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:36 GMT
+ - Thu, 20 May 2021 06:24:22 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -262,9 +259,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:37 GMT
+ - Thu, 20 May 2021 06:24:23 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -274,7 +271,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:37 GMT
+ - Thu, 20 May 2021 06:24:24 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -294,9 +291,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:24:24 GMT
x-ms-umask:
- '0027'
x-ms-version:
@@ -310,11 +307,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -330,9 +327,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:26 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -348,11 +345,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:24:26 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-access-tier:
@@ -362,7 +359,7 @@ interactions:
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
x-ms-lease-state:
- available
x-ms-lease-status:
@@ -390,24 +387,24 @@ interactions:
ParameterSetName:
- -f --account-name --account-key
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-storage-dfs/12.3.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-date:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:24:27 GMT
x-ms-version:
- '2020-02-10'
method: GET
uri: https://clitestaldsaccount000002.dfs.core.windows.net/cont000003?resource=filesystem&recursive=true&maxResults=5000
response:
body:
- string: '{"paths":[{"contentLength":"0","creationTime":"132640731991888794","etag":"0x8D90A222396D39A","group":"$superuser","isDirectory":"true","lastModified":"Wed,
- 28 Apr 2021 08:46:39 GMT","name":"testdirectory","owner":"$superuser","permissions":"rwxr-x---"}]}
+ string: '{"paths":[{"contentLength":"0","creationTime":"132659654659160500","etag":"0x8D91B57EA7339B4","group":"$superuser","isDirectory":"true","lastModified":"Thu,
+ 20 May 2021 06:24:25 GMT","name":"testdirectory","owner":"$superuser","permissions":"rwxr-x---"}]}
'
headers:
content-type:
- application/json;charset=utf-8
date:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:24:27 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -423,9 +420,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:41 GMT
+ - Thu, 20 May 2021 06:24:28 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -441,11 +438,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:41 GMT
+ - Thu, 20 May 2021 06:24:29 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-access-tier:
@@ -455,7 +452,7 @@ interactions:
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
x-ms-lease-state:
- available
x-ms-lease-status:
@@ -475,9 +472,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:42 GMT
+ - Thu, 20 May 2021 06:24:29 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -487,11 +484,11 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:42 GMT
+ - Thu, 20 May 2021 06:24:30 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-acl:
@@ -513,9 +510,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:24:31 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -531,11 +528,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:44 GMT
+ - Thu, 20 May 2021 06:24:31 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-access-tier:
@@ -545,7 +542,7 @@ interactions:
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
x-ms-lease-state:
- available
x-ms-lease-status:
@@ -565,9 +562,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:32 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -577,7 +574,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:32 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -597,9 +594,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:24:33 GMT
x-ms-permissions:
- rwxrwxrwx
x-ms-umask:
@@ -615,11 +612,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:24:33 GMT
etag:
- - '"0x8D90A22284ADA05"'
+ - '"0x8D91B57EF88CE1A"'
last-modified:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -635,9 +632,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -653,11 +650,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:35 GMT
etag:
- - '"0x8D90A22284ADA05"'
+ - '"0x8D91B57EF88CE1A"'
last-modified:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-access-tier:
@@ -667,7 +664,7 @@ interactions:
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
x-ms-lease-state:
- available
x-ms-lease-status:
@@ -687,9 +684,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:24:36 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -699,11 +696,11 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:49 GMT
+ - Thu, 20 May 2021 06:24:36 GMT
etag:
- - '"0x8D90A22284ADA05"'
+ - '"0x8D91B57EF88CE1A"'
last-modified:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-acl:
@@ -727,11 +724,11 @@ interactions:
Content-Length:
- '131072'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-blob-type:
- BlockBlob
x-ms-date:
- - Wed, 28 Apr 2021 08:46:49 GMT
+ - Thu, 20 May 2021 06:24:37 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -745,11 +742,11 @@ interactions:
content-md5:
- DfvoqkwgtS4bi/PLbL3xkw==
date:
- - Wed, 28 Apr 2021 08:46:50 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
etag:
- - '"0x8D90A222AE86200"'
+ - '"0x8D91B57F27329ED"'
last-modified:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -767,11 +764,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-acl:
- user::rwx,group::r--,other::---
x-ms-date:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
x-ms-version:
- '2019-02-02'
method: PATCH
@@ -783,11 +780,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:24:40 GMT
etag:
- - '"0x8D90A222AE86200"'
+ - '"0x8D91B57F27329ED"'
last-modified:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-namespace-enabled:
@@ -803,9 +800,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:24:40 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -815,11 +812,11 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:24:40 GMT
etag:
- - '"0x8D90A222AE86200"'
+ - '"0x8D91B57F27329ED"'
last-modified:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-acl:
@@ -843,9 +840,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:24:42 GMT
x-ms-permissions:
- rwxrwxrwx
x-ms-version:
@@ -859,11 +856,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:24:42 GMT
etag:
- - '"0x8D90A222AE86200"'
+ - '"0x8D91B57F27329ED"'
last-modified:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-namespace-enabled:
@@ -879,9 +876,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:55 GMT
+ - Thu, 20 May 2021 06:24:43 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -891,11 +888,11 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:55 GMT
+ - Thu, 20 May 2021 06:24:44 GMT
etag:
- - '"0x8D90A222AE86200"'
+ - '"0x8D91B57F27329ED"'
last-modified:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-acl:
@@ -919,11 +916,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-acl:
- user::rwx,group::r--,other::---
x-ms-date:
- - Wed, 28 Apr 2021 08:46:56 GMT
+ - Thu, 20 May 2021 06:24:44 GMT
x-ms-version:
- '2019-02-02'
method: PATCH
@@ -935,11 +932,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:57 GMT
+ - Thu, 20 May 2021 06:24:44 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-namespace-enabled:
@@ -955,9 +952,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:57 GMT
+ - Thu, 20 May 2021 06:24:45 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -967,11 +964,11 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:58 GMT
+ - Thu, 20 May 2021 06:24:46 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-acl:
@@ -995,9 +992,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:58 GMT
+ - Thu, 20 May 2021 06:24:47 GMT
x-ms-permissions:
- rwxrwxrwx
x-ms-version:
@@ -1011,11 +1008,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:58 GMT
+ - Thu, 20 May 2021 06:24:48 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-namespace-enabled:
@@ -1031,9 +1028,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:59 GMT
+ - Thu, 20 May 2021 06:24:48 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -1043,11 +1040,11 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:47:00 GMT
+ - Thu, 20 May 2021 06:24:49 GMT
etag:
- - '"0x8D90A222396D39A"'
+ - '"0x8D91B57EA7339B4"'
last-modified:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-acl:
@@ -1071,9 +1068,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:47:01 GMT
+ - Thu, 20 May 2021 06:24:49 GMT
x-ms-meta-tag1:
- value1
x-ms-version:
@@ -1087,11 +1084,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:47:01 GMT
+ - Thu, 20 May 2021 06:24:50 GMT
etag:
- - '"0x8D90A223134AEE8"'
+ - '"0x8D91B57F9460528"'
last-modified:
- - Wed, 28 Apr 2021 08:47:02 GMT
+ - Thu, 20 May 2021 06:24:50 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -1107,9 +1104,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:47:02 GMT
+ - Thu, 20 May 2021 06:24:51 GMT
x-ms-version:
- '2019-02-02'
method: GET
@@ -1121,11 +1118,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:47:02 GMT
+ - Thu, 20 May 2021 06:24:52 GMT
etag:
- - '"0x8D90A223134AEE8"'
+ - '"0x8D91B57F9460528"'
last-modified:
- - Wed, 28 Apr 2021 08:47:02 GMT
+ - Thu, 20 May 2021 06:24:50 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-meta-hdi_isfolder:
@@ -1145,9 +1142,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:47:03 GMT
+ - Thu, 20 May 2021 06:24:52 GMT
x-ms-version:
- '2019-02-02'
method: DELETE
@@ -1159,7 +1156,7 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:47:03 GMT
+ - Thu, 20 May 2021 06:24:52 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-version:
@@ -1173,9 +1170,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:47:04 GMT
+ - Thu, 20 May 2021 06:24:53 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -1185,7 +1182,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:47:04 GMT
+ - Thu, 20 May 2021 06:24:54 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob_move.yaml b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob_move.yaml
index 64249227e55..57dde0a29dd 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob_move.yaml
+++ b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_adls_blob_move.yaml
@@ -19,9 +19,9 @@ interactions:
ParameterSetName:
- -n -g --kind --hierarchical-namespace -l --https-only
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: PUT
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002?api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002?api-version=2021-04-01
response:
body:
string: ''
@@ -33,11 +33,11 @@ interactions:
content-type:
- text/plain; charset=utf-8
date:
- - Wed, 28 Apr 2021 08:46:19 GMT
+ - Thu, 20 May 2021 06:24:02 GMT
expires:
- '-1'
location:
- - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/centralus/asyncoperations/f4e23635-de3a-4d53-b74c-efba17f457fc?monitor=true&api-version=2021-02-01
+ - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/centralus/asyncoperations/c622c69b-e7fc-41a2-811d-efa23125de2b?monitor=true&api-version=2021-04-01
pragma:
- no-cache
server:
@@ -47,7 +47,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-writes:
- - '1197'
+ - '1198'
status:
code: 202
message: Accepted
@@ -65,12 +65,12 @@ interactions:
ParameterSetName:
- -n -g --kind --hierarchical-namespace -l --https-only
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: GET
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/centralus/asyncoperations/f4e23635-de3a-4d53-b74c-efba17f457fc?monitor=true&api-version=2021-02-01
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Storage/locations/centralus/asyncoperations/c622c69b-e7fc-41a2-811d-efa23125de2b?monitor=true&api-version=2021-04-01
response:
body:
- string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002","name":"clitestaldsaccount000002","type":"Microsoft.Storage/storageAccounts","location":"centralus","tags":{},"properties":{"keyCreationTime":{"key1":"2021-04-28T08:46:16.7788283Z","key2":"2021-04-28T08:46:16.7788283Z"},"privateEndpointConnections":[],"isHnsEnabled":true,"networkAcls":{"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:46:16.7788283Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-04-28T08:46:16.7788283Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-04-28T08:46:16.6850246Z","primaryEndpoints":{"dfs":"https://clitestaldsaccount000002.dfs.core.windows.net/","web":"https://clitestaldsaccount000002.z19.web.core.windows.net/","blob":"https://clitestaldsaccount000002.blob.core.windows.net/","queue":"https://clitestaldsaccount000002.queue.core.windows.net/","table":"https://clitestaldsaccount000002.table.core.windows.net/","file":"https://clitestaldsaccount000002.file.core.windows.net/"},"primaryLocation":"centralus","statusOfPrimary":"available","secondaryLocation":"eastus2","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://clitestaldsaccount000002-secondary.dfs.core.windows.net/","web":"https://clitestaldsaccount000002-secondary.z19.web.core.windows.net/","blob":"https://clitestaldsaccount000002-secondary.blob.core.windows.net/","queue":"https://clitestaldsaccount000002-secondary.queue.core.windows.net/","table":"https://clitestaldsaccount000002-secondary.table.core.windows.net/"}}}'
+ string: '{"sku":{"name":"Standard_RAGRS","tier":"Standard"},"kind":"StorageV2","id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002","name":"clitestaldsaccount000002","type":"Microsoft.Storage/storageAccounts","location":"centralus","tags":{},"properties":{"privateEndpointConnections":[],"isHnsEnabled":true,"networkAcls":{"bypass":"AzureServices","virtualNetworkRules":[],"ipRules":[],"defaultAction":"Allow"},"supportsHttpsTrafficOnly":true,"encryption":{"services":{"file":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T06:24:01.0112417Z"},"blob":{"keyType":"Account","enabled":true,"lastEnabledTime":"2021-05-20T06:24:01.0112417Z"}},"keySource":"Microsoft.Storage"},"accessTier":"Hot","provisioningState":"Succeeded","creationTime":"2021-05-20T06:24:00.9174536Z","primaryEndpoints":{"dfs":"https://clitestaldsaccount000002.dfs.core.windows.net/","web":"https://clitestaldsaccount000002.z19.web.core.windows.net/","blob":"https://clitestaldsaccount000002.blob.core.windows.net/","queue":"https://clitestaldsaccount000002.queue.core.windows.net/","table":"https://clitestaldsaccount000002.table.core.windows.net/","file":"https://clitestaldsaccount000002.file.core.windows.net/"},"primaryLocation":"centralus","statusOfPrimary":"available","secondaryLocation":"eastus2","statusOfSecondary":"available","secondaryEndpoints":{"dfs":"https://clitestaldsaccount000002-secondary.dfs.core.windows.net/","web":"https://clitestaldsaccount000002-secondary.z19.web.core.windows.net/","blob":"https://clitestaldsaccount000002-secondary.blob.core.windows.net/","queue":"https://clitestaldsaccount000002-secondary.queue.core.windows.net/","table":"https://clitestaldsaccount000002-secondary.table.core.windows.net/"},"keyCreationTime":{"key1":"2021-05-20T06:24:00.9956054Z","key2":"2021-05-20T06:24:00.9956054Z"}}}'
headers:
cache-control:
- no-cache
@@ -79,7 +79,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:36 GMT
+ - Thu, 20 May 2021 06:24:20 GMT
expires:
- '-1'
pragma:
@@ -113,12 +113,12 @@ interactions:
ParameterSetName:
- -n -g --query -o
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002/listKeys?api-version=2021-02-01&$expand=kerb
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitestaldsaccount000002/listKeys?api-version=2021-04-01&$expand=kerb
response:
body:
- string: '{"keys":[{"creationTime":"2021-04-28T08:46:16.7788283Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-04-28T08:46:16.7788283Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
+ string: '{"keys":[{"creationTime":"2021-05-20T06:24:00.9956054Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-05-20T06:24:00.9956054Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
headers:
cache-control:
- no-cache
@@ -127,7 +127,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:24:22 GMT
expires:
- '-1'
pragma:
@@ -143,7 +143,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-resource-requests:
- - '11999'
+ - '11998'
status:
code: 200
message: OK
@@ -155,9 +155,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:24:22 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -169,11 +169,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:24:24 GMT
etag:
- - '"0x8D90A222428493A"'
+ - '"0x8D91B57EA03837E"'
last-modified:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-version:
@@ -187,9 +187,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:24:25 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -199,7 +199,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:24:26 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -219,9 +219,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:41 GMT
+ - Thu, 20 May 2021 06:24:26 GMT
x-ms-umask:
- '0027'
x-ms-version:
@@ -235,13 +235,15 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:42 GMT
+ - Thu, 20 May 2021 06:24:27 GMT
etag:
- - '"0x8D90A2225BE70DD"'
+ - '"0x8D91B57EBA82C82"'
last-modified:
- - Wed, 28 Apr 2021 08:46:42 GMT
+ - Thu, 20 May 2021 06:24:27 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-request-server-encrypted:
+ - 'true'
x-ms-version:
- '2019-02-02'
status:
@@ -255,11 +257,11 @@ interactions:
Content-Length:
- '131072'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-blob-type:
- BlockBlob
x-ms-date:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:24:28 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -273,11 +275,11 @@ interactions:
content-md5:
- DfvoqkwgtS4bi/PLbL3xkw==
date:
- - Wed, 28 Apr 2021 08:46:44 GMT
+ - Thu, 20 May 2021 06:24:29 GMT
etag:
- - '"0x8D90A222715C34F"'
+ - '"0x8D91B57ED1A9054"'
last-modified:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:30 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -293,9 +295,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:30 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -305,7 +307,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:31 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -325,9 +327,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:24:31 GMT
x-ms-umask:
- '0027'
x-ms-version:
@@ -341,13 +343,15 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:32 GMT
etag:
- - '"0x8D90A22287CCE04"'
+ - '"0x8D91B57EE9225ED"'
last-modified:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:32 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-request-server-encrypted:
+ - 'true'
x-ms-version:
- '2019-02-02'
status:
@@ -359,9 +363,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:33 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -379,11 +383,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:24:33 GMT
etag:
- - '"0x8D90A222715C34F"'
+ - '"0x8D91B57ED1A9054"'
last-modified:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:30 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-access-tier:
@@ -393,7 +397,7 @@ interactions:
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:30 GMT
x-ms-lease-state:
- available
x-ms-lease-status:
@@ -411,9 +415,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -423,7 +427,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:24:33 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -443,11 +447,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
x-ms-rename-source:
- - /cont4oivmqb5nxugfeletfpl/dir/blobwa4fgaqx374bnddsjxfw
+ - /conticd2oqeulfvoudkwkibh/dir/blobh6acri32tk2mpjjauriu
x-ms-version:
- '2019-02-02'
method: PUT
@@ -459,7 +463,7 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:49 GMT
+ - Thu, 20 May 2021 06:24:34 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-version:
@@ -473,9 +477,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:50 GMT
+ - Thu, 20 May 2021 06:24:35 GMT
x-ms-version:
- '2019-02-02'
method: GET
@@ -484,9 +488,9 @@ interactions:
body:
string: "\uFEFFdir1/5000dir1/blob000004Wed,
- 28 Apr 2021 08:46:45 GMTWed, 28 Apr 2021 08:46:45
- GMT0x8D90A222715C34F131072application/octet-streamdir1/5000dir1/blob000004Thu,
+ 20 May 2021 06:24:30 GMTThu, 20 May 2021 06:24:30
+ GMT0x8D91B57ED1A9054131072application/octet-streamDfvoqkwgtS4bi/PLbL3xkw==BlockBlobHottrueunlockedavailabletrue"
@@ -494,7 +498,7 @@ interactions:
content-type:
- application/xml
date:
- - Wed, 28 Apr 2021 08:46:50 GMT
+ - Thu, 20 May 2021 06:24:36 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -510,9 +514,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:24:37 GMT
x-ms-version:
- '2019-02-02'
method: GET
@@ -527,7 +531,7 @@ interactions:
content-type:
- application/xml
date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:24:37 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -543,9 +547,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:24:38 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -563,11 +567,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:24:38 GMT
etag:
- - '"0x8D90A222715C34F"'
+ - '"0x8D91B57ED1A9054"'
last-modified:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:30 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-access-tier:
@@ -577,7 +581,7 @@ interactions:
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:24:30 GMT
x-ms-lease-state:
- available
x-ms-lease-status:
@@ -595,9 +599,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -607,7 +611,7 @@ interactions:
string: ''
headers:
date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:24:39 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -627,11 +631,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:54 GMT
+ - Thu, 20 May 2021 06:24:40 GMT
x-ms-rename-source:
- - /cont4oivmqb5nxugfeletfpl/dir1/blobwa4fgaqx374bnddsjxfw
+ - /conticd2oqeulfvoudkwkibh/dir1/blobh6acri32tk2mpjjauriu
x-ms-version:
- '2019-02-02'
method: PUT
@@ -643,7 +647,7 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:54 GMT
+ - Thu, 20 May 2021 06:24:40 GMT
server:
- Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
x-ms-version:
@@ -657,9 +661,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:55 GMT
+ - Thu, 20 May 2021 06:24:41 GMT
x-ms-version:
- '2019-02-02'
method: GET
@@ -668,9 +672,9 @@ interactions:
body:
string: "\uFEFFdir1/5000dir1/blob000005Wed,
- 28 Apr 2021 08:46:45 GMTWed, 28 Apr 2021 08:46:45
- GMT0x8D90A222715C34F131072application/octet-streamdir1/5000dir1/blob000005Thu,
+ 20 May 2021 06:24:30 GMTThu, 20 May 2021 06:24:30
+ GMT0x8D91B57ED1A9054131072application/octet-streamDfvoqkwgtS4bi/PLbL3xkw==BlockBlobHottrueunlockedavailabletrue"
@@ -678,7 +682,7 @@ interactions:
content-type:
- application/xml
date:
- - Wed, 28 Apr 2021 08:46:55 GMT
+ - Thu, 20 May 2021 06:24:41 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
@@ -694,9 +698,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.1.0-2.1.0 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:56 GMT
+ - Thu, 20 May 2021 06:24:43 GMT
x-ms-version:
- '2019-02-02'
method: HEAD
@@ -712,11 +716,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:57 GMT
+ - Thu, 20 May 2021 06:24:43 GMT
etag:
- - '"0x8D90A22287CCE04"'
+ - '"0x8D91B57EE9225ED"'
last-modified:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:32 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
x-ms-access-tier:
@@ -726,7 +730,7 @@ interactions:
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:24:32 GMT
x-ms-lease-state:
- available
x-ms-lease-status:
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_file_upload_small_file_v2.yaml b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_file_upload_small_file_v2.yaml
index 38b11f0b890..7cf1e866266 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_file_upload_small_file_v2.yaml
+++ b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_file_upload_small_file_v2.yaml
@@ -15,12 +15,12 @@ interactions:
ParameterSetName:
- -n -g --query -o
User-Agent:
- - AZURECLI/2.22.1 azsdk-python-azure-mgmt-storage/17.1.0 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - AZURECLI/2.23.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
method: POST
- uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitest000002/listKeys?api-version=2021-02-01&$expand=kerb
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitest000002/listKeys?api-version=2021-04-01&$expand=kerb
response:
body:
- string: '{"keys":[{"creationTime":"2021-04-28T08:46:14.1285706Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-04-28T08:46:14.1285706Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
+ string: '{"keys":[{"creationTime":"2021-05-20T06:25:43.5298863Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-05-20T06:25:43.5298863Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
headers:
cache-control:
- no-cache
@@ -29,7 +29,7 @@ interactions:
content-type:
- application/json
date:
- - Wed, 28 Apr 2021 08:46:34 GMT
+ - Thu, 20 May 2021 06:26:03 GMT
expires:
- '-1'
pragma:
@@ -45,7 +45,7 @@ interactions:
x-content-type-options:
- nosniff
x-ms-ratelimit-remaining-subscription-resource-requests:
- - '11998'
+ - '11999'
status:
code: 200
message: OK
@@ -57,9 +57,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:35 GMT
+ - Thu, 20 May 2021 06:26:05 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -71,11 +71,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:35 GMT
+ - Thu, 20 May 2021 06:26:06 GMT
etag:
- - '"0x8D90A2221EB8EB6"'
+ - '"0x8D91B582641DF9B"'
last-modified:
- - Wed, 28 Apr 2021 08:46:36 GMT
+ - Thu, 20 May 2021 06:26:06 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-version:
@@ -95,7 +95,7 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-cache-control:
- no-cache
x-ms-content-disposition:
@@ -109,7 +109,7 @@ interactions:
x-ms-content-type:
- multipart/form-data;
x-ms-date:
- - Wed, 28 Apr 2021 08:46:37 GMT
+ - Thu, 20 May 2021 06:26:07 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
@@ -135,27 +135,27 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:37 GMT
+ - Thu, 20 May 2021 06:26:07 GMT
etag:
- - '"0x8D90A22230ADBC2"'
+ - '"0x8D91B58277AC466"'
last-modified:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:26:08 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- - '2021-04-28T08:46:38.2715842Z'
+ - '2021-05-20T06:26:08.3064934Z'
x-ms-file-creation-time:
- - '2021-04-28T08:46:38.2715842Z'
+ - '2021-05-20T06:26:08.3064934Z'
x-ms-file-id:
- '13835128424026341376'
x-ms-file-last-write-time:
- - '2021-04-28T08:46:38.2715842Z'
+ - '2021-05-20T06:26:08.3064934Z'
x-ms-file-parent-id:
- '0'
x-ms-file-permission-key:
- - 3551120514346288423*11090383507847605673
+ - 4979777615065758298*17130374783272374996
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
@@ -178,9 +178,9 @@ interactions:
Content-Type:
- application/octet-stream
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-date:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:26:08 GMT
x-ms-range:
- bytes=0-86
x-ms-version:
@@ -198,11 +198,11 @@ interactions:
content-md5:
- sYGKK8OX+WLKH+2bAe4tSQ==
date:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:26:07 GMT
etag:
- - '"0x8D90A2223312F0E"'
+ - '"0x8D91B5827A67022"'
last-modified:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:26:08 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -218,9 +218,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:26:08 GMT
x-ms-version:
- '2018-11-09'
method: HEAD
@@ -242,11 +242,11 @@ interactions:
content-type:
- multipart/form-data;
date:
- - Wed, 28 Apr 2021 08:46:39 GMT
+ - Thu, 20 May 2021 06:26:09 GMT
etag:
- - '"0x8D90A2223312F0E"'
+ - '"0x8D91B5827A67022"'
last-modified:
- - Wed, 28 Apr 2021 08:46:38 GMT
+ - Thu, 20 May 2021 06:26:08 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-meta-key:
@@ -272,11 +272,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-content-length:
- '87'
x-ms-date:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:26:10 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
@@ -294,14 +294,14 @@ interactions:
response:
body:
string: "\uFEFFParentNotFoundThe
- specified parent path does not exist.\nRequestId:07d312d1-c01a-0050-100b-3c0738000000\nTime:2021-04-28T08:46:40.9747475Z"
+ specified parent path does not exist.\nRequestId:1210ced7-a01a-012e-6b41-4df7f1000000\nTime:2021-05-20T06:26:11.4650449Z"
headers:
content-length:
- '224'
content-type:
- application/xml
date:
- - Wed, 28 Apr 2021 08:46:40 GMT
+ - Thu, 20 May 2021 06:26:10 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
@@ -319,9 +319,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:41 GMT
+ - Thu, 20 May 2021 06:26:11 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -333,11 +333,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:41 GMT
+ - Thu, 20 May 2021 06:26:12 GMT
etag:
- - '"0x8D90A22254C7CB5"'
+ - '"0x8D91B582A171CBD"'
last-modified:
- - Wed, 28 Apr 2021 08:46:42 GMT
+ - Thu, 20 May 2021 06:26:12 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -359,11 +359,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-content-length:
- '87'
x-ms-date:
- - Wed, 28 Apr 2021 08:46:42 GMT
+ - Thu, 20 May 2021 06:26:12 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
@@ -385,27 +385,27 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:26:13 GMT
etag:
- - '"0x8D90A2225FADC23"'
+ - '"0x8D91B582AD33B73"'
last-modified:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:26:13 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- - '2021-04-28T08:46:43.1999011Z'
+ - '2021-05-20T06:26:13.9194227Z'
x-ms-file-creation-time:
- - '2021-04-28T08:46:43.1999011Z'
+ - '2021-05-20T06:26:13.9194227Z'
x-ms-file-id:
- '13835075647468208128'
x-ms-file-last-write-time:
- - '2021-04-28T08:46:43.1999011Z'
+ - '2021-05-20T06:26:13.9194227Z'
x-ms-file-parent-id:
- '13835163608398430208'
x-ms-file-permission-key:
- - 3551120514346288423*11090383507847605673
+ - 4979777615065758298*17130374783272374996
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
@@ -428,9 +428,9 @@ interactions:
Content-Type:
- application/octet-stream
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-date:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:26:14 GMT
x-ms-range:
- bytes=0-86
x-ms-version:
@@ -448,11 +448,11 @@ interactions:
content-md5:
- sYGKK8OX+WLKH+2bAe4tSQ==
date:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:26:13 GMT
etag:
- - '"0x8D90A222620E14B"'
+ - '"0x8D91B582AFC75C3"'
last-modified:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:26:14 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -468,9 +468,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:26:14 GMT
x-ms-version:
- '2018-11-09'
method: HEAD
@@ -484,11 +484,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:44 GMT
+ - Thu, 20 May 2021 06:26:14 GMT
etag:
- - '"0x8D90A222620E14B"'
+ - '"0x8D91B582AFC75C3"'
last-modified:
- - Wed, 28 Apr 2021 08:46:43 GMT
+ - Thu, 20 May 2021 06:26:14 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-server-encrypted:
@@ -512,11 +512,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-content-length:
- '87'
x-ms-date:
- - Wed, 28 Apr 2021 08:46:44 GMT
+ - Thu, 20 May 2021 06:26:15 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
@@ -538,27 +538,27 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:26:16 GMT
etag:
- - '"0x8D90A222781E650"'
+ - '"0x8D91B582C83D112"'
last-modified:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:26:16 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- - '2021-04-28T08:46:45.7626192Z'
+ - '2021-05-20T06:26:16.7544082Z'
x-ms-file-creation-time:
- - '2021-04-28T08:46:45.7626192Z'
+ - '2021-05-20T06:26:16.7544082Z'
x-ms-file-id:
- '13835146016212385792'
x-ms-file-last-write-time:
- - '2021-04-28T08:46:45.7626192Z'
+ - '2021-05-20T06:26:16.7544082Z'
x-ms-file-parent-id:
- '0'
x-ms-file-permission-key:
- - 3551120514346288423*11090383507847605673
+ - 4979777615065758298*17130374783272374996
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
@@ -581,9 +581,9 @@ interactions:
Content-Type:
- application/octet-stream
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-date:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:26:16 GMT
x-ms-range:
- bytes=0-86
x-ms-version:
@@ -601,11 +601,11 @@ interactions:
content-md5:
- sYGKK8OX+WLKH+2bAe4tSQ==
date:
- - Wed, 28 Apr 2021 08:46:45 GMT
+ - Thu, 20 May 2021 06:26:16 GMT
etag:
- - '"0x8D90A2227A860BE"'
+ - '"0x8D91B582CAD0B59"'
last-modified:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:26:17 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -621,9 +621,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:26:17 GMT
x-ms-version:
- '2018-11-09'
method: HEAD
@@ -637,11 +637,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:26:17 GMT
etag:
- - '"0x8D90A2227A860BE"'
+ - '"0x8D91B582CAD0B59"'
last-modified:
- - Wed, 28 Apr 2021 08:46:46 GMT
+ - Thu, 20 May 2021 06:26:17 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-server-encrypted:
@@ -665,11 +665,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-content-length:
- '87'
x-ms-date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:26:18 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
@@ -691,27 +691,27 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:26:19 GMT
etag:
- - '"0x8D90A2228FF793D"'
+ - '"0x8D91B582E2A52BA"'
last-modified:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:26:19 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- - '2021-04-28T08:46:48.2633021Z'
+ - '2021-05-20T06:26:19.5233466Z'
x-ms-file-creation-time:
- - '2021-04-28T08:46:48.2633021Z'
+ - '2021-05-20T06:26:19.5233466Z'
x-ms-file-id:
- '13835110831840296960'
x-ms-file-last-write-time:
- - '2021-04-28T08:46:48.2633021Z'
+ - '2021-05-20T06:26:19.5233466Z'
x-ms-file-parent-id:
- '13835163608398430208'
x-ms-file-permission-key:
- - 3551120514346288423*11090383507847605673
+ - 4979777615065758298*17130374783272374996
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
@@ -734,9 +734,9 @@ interactions:
Content-Type:
- application/octet-stream
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:26:19 GMT
x-ms-range:
- bytes=0-86
x-ms-version:
@@ -754,11 +754,11 @@ interactions:
content-md5:
- sYGKK8OX+WLKH+2bAe4tSQ==
date:
- - Wed, 28 Apr 2021 08:46:47 GMT
+ - Thu, 20 May 2021 06:26:19 GMT
etag:
- - '"0x8D90A2229261ABB"'
+ - '"0x8D91B582E56258E"'
last-modified:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:26:19 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -774,9 +774,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:26:20 GMT
x-ms-version:
- '2018-11-09'
method: HEAD
@@ -790,11 +790,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:49 GMT
+ - Thu, 20 May 2021 06:26:20 GMT
etag:
- - '"0x8D90A2229261ABB"'
+ - '"0x8D91B582E56258E"'
last-modified:
- - Wed, 28 Apr 2021 08:46:48 GMT
+ - Thu, 20 May 2021 06:26:19 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-server-encrypted:
@@ -814,9 +814,9 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:49 GMT
+ - Thu, 20 May 2021 06:26:21 GMT
x-ms-version:
- '2018-11-09'
method: PUT
@@ -828,11 +828,11 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:50 GMT
+ - Thu, 20 May 2021 06:26:22 GMT
etag:
- - '"0x8D90A222A7A2564"'
+ - '"0x8D91B582FCF4D87"'
last-modified:
- - Wed, 28 Apr 2021 08:46:50 GMT
+ - Thu, 20 May 2021 06:26:22 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -854,11 +854,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-content-length:
- '87'
x-ms-date:
- - Wed, 28 Apr 2021 08:46:50 GMT
+ - Thu, 20 May 2021 06:26:22 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
@@ -880,27 +880,27 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:26:22 GMT
etag:
- - '"0x8D90A222B280F66"'
+ - '"0x8D91B583098DC53"'
last-modified:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:26:23 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- - '2021-04-28T08:46:51.8847334Z'
+ - '2021-05-20T06:26:23.6032083Z'
x-ms-file-creation-time:
- - '2021-04-28T08:46:51.8847334Z'
+ - '2021-05-20T06:26:23.6032083Z'
x-ms-file-id:
- '13835066851375185920'
x-ms-file-last-write-time:
- - '2021-04-28T08:46:51.8847334Z'
+ - '2021-05-20T06:26:23.6032083Z'
x-ms-file-parent-id:
- '13835181200584474624'
x-ms-file-permission-key:
- - 3551120514346288423*11090383507847605673
+ - 4979777615065758298*17130374783272374996
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
@@ -923,9 +923,9 @@ interactions:
Content-Type:
- application/octet-stream
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-date:
- - Wed, 28 Apr 2021 08:46:51 GMT
+ - Thu, 20 May 2021 06:26:23 GMT
x-ms-range:
- bytes=0-86
x-ms-version:
@@ -943,11 +943,11 @@ interactions:
content-md5:
- sYGKK8OX+WLKH+2bAe4tSQ==
date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:26:22 GMT
etag:
- - '"0x8D90A222B4F9B68"'
+ - '"0x8D91B5830C2DA05"'
last-modified:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:26:23 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -963,9 +963,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:26:24 GMT
x-ms-version:
- '2018-11-09'
method: HEAD
@@ -979,11 +979,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:26:24 GMT
etag:
- - '"0x8D90A222B4F9B68"'
+ - '"0x8D91B5830C2DA05"'
last-modified:
- - Wed, 28 Apr 2021 08:46:52 GMT
+ - Thu, 20 May 2021 06:26:23 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-server-encrypted:
@@ -1007,11 +1007,11 @@ interactions:
Content-Length:
- '0'
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-content-length:
- '87'
x-ms-date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:26:25 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
@@ -1033,27 +1033,27 @@ interactions:
content-length:
- '0'
date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:26:25 GMT
etag:
- - '"0x8D90A222CBDE96C"'
+ - '"0x8D91B58325DE7FC"'
last-modified:
- - Wed, 28 Apr 2021 08:46:54 GMT
+ - Thu, 20 May 2021 06:26:26 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- - '2021-04-28T08:46:54.5445228Z'
+ - '2021-05-20T06:26:26.5722876Z'
x-ms-file-creation-time:
- - '2021-04-28T08:46:54.5445228Z'
+ - '2021-05-20T06:26:26.5722876Z'
x-ms-file-id:
- '13835137220119363584'
x-ms-file-last-write-time:
- - '2021-04-28T08:46:54.5445228Z'
+ - '2021-05-20T06:26:26.5722876Z'
x-ms-file-parent-id:
- '13835181200584474624'
x-ms-file-permission-key:
- - 3551120514346288423*11090383507847605673
+ - 4979777615065758298*17130374783272374996
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
@@ -1076,9 +1076,9 @@ interactions:
Content-Type:
- application/octet-stream
User-Agent:
- - azsdk-python-storage-file-share/12.3.0b1 Python/3.7.7 (Windows-10-10.0.19041-SP0)
+ - azsdk-python-storage-file-share/12.3.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
x-ms-date:
- - Wed, 28 Apr 2021 08:46:54 GMT
+ - Thu, 20 May 2021 06:26:26 GMT
x-ms-range:
- bytes=0-86
x-ms-version:
@@ -1096,11 +1096,11 @@ interactions:
content-md5:
- sYGKK8OX+WLKH+2bAe4tSQ==
date:
- - Wed, 28 Apr 2021 08:46:53 GMT
+ - Thu, 20 May 2021 06:26:26 GMT
etag:
- - '"0x8D90A222CE5756A"'
+ - '"0x8D91B58328C7A64"'
last-modified:
- - Wed, 28 Apr 2021 08:46:54 GMT
+ - Thu, 20 May 2021 06:26:26 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-request-server-encrypted:
@@ -1116,9 +1116,9 @@ interactions:
Connection:
- keep-alive
User-Agent:
- - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.7.7; Windows 10) AZURECLI/2.22.1
+ - Azure-Storage/2.0.0-2.0.1 (Python CPython 3.8.3; Windows 10) AZURECLI/2.23.0
x-ms-date:
- - Wed, 28 Apr 2021 08:46:54 GMT
+ - Thu, 20 May 2021 06:26:27 GMT
x-ms-version:
- '2018-11-09'
method: HEAD
@@ -1132,11 +1132,11 @@ interactions:
content-type:
- application/octet-stream
date:
- - Wed, 28 Apr 2021 08:46:55 GMT
+ - Thu, 20 May 2021 06:26:27 GMT
etag:
- - '"0x8D90A222CE5756A"'
+ - '"0x8D91B58328C7A64"'
last-modified:
- - Wed, 28 Apr 2021 08:46:54 GMT
+ - Thu, 20 May 2021 06:26:26 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-server-encrypted:
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_fs_soft_delete.yaml b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_fs_soft_delete.yaml
new file mode 100644
index 00000000000..2407ecabb11
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/tests/latest/recordings/test_storage_fs_soft_delete.yaml
@@ -0,0 +1,905 @@
+interactions:
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage account keys list
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ ParameterSetName:
+ - -n -g --query -o
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-azure-mgmt-storage/18.0.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ method: POST
+ uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001/providers/Microsoft.Storage/storageAccounts/clitest000002/listKeys?api-version=2021-04-01&$expand=kerb
+ response:
+ body:
+ string: '{"keys":[{"creationTime":"2021-05-28T07:15:56.0507406Z","keyName":"key1","value":"veryFakedStorageAccountKey==","permissions":"FULL"},{"creationTime":"2021-05-28T07:15:56.0507406Z","keyName":"key2","value":"veryFakedStorageAccountKey==","permissions":"FULL"}]}'
+ headers:
+ cache-control:
+ - no-cache
+ content-length:
+ - '380'
+ content-type:
+ - application/json
+ date:
+ - Fri, 28 May 2021 07:16:18 GMT
+ expires:
+ - '-1'
+ pragma:
+ - no-cache
+ server:
+ - Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains
+ transfer-encoding:
+ - chunked
+ vary:
+ - Accept-Encoding
+ x-content-type-options:
+ - nosniff
+ x-ms-ratelimit-remaining-subscription-resource-requests:
+ - '11998'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - '*/*'
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs create
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ ParameterSetName:
+ - -n --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:18 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: PUT
+ uri: https://clitest000002.blob.core.windows.net/filesystem000003?restype=container
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:21 GMT
+ etag:
+ - '"0x8D921A87EEC99EB"'
+ last-modified:
+ - Fri, 28 May 2021 07:16:21 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 201
+ message: Created
+- request:
+ body: null
+ headers:
+ Accept:
+ - '*/*'
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file upload
+ Connection:
+ - keep-alive
+ ParameterSetName:
+ - -f -s -p --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:21 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: HEAD
+ uri: https://clitest000002.blob.core.windows.net/filesystem000003/file000004
+ response:
+ body:
+ string: ''
+ headers:
+ date:
+ - Fri, 28 May 2021 07:16:23 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-error-code:
+ - BlobNotFound
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 404
+ message: The specified blob does not exist.
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file upload
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ ParameterSetName:
+ - -f -s -p --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:23 GMT
+ x-ms-properties:
+ - ''
+ x-ms-version:
+ - '2020-02-10'
+ method: PUT
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003/file000004?resource=file
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:24 GMT
+ etag:
+ - '"0x8D921A880AD942F"'
+ last-modified:
+ - Fri, 28 May 2021 07:16:24 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-request-server-encrypted:
+ - 'true'
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 201
+ message: Created
+- request:
+ body: "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file upload
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '1024'
+ Content-Type:
+ - application/json
+ ParameterSetName:
+ - -f -s -p --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:24 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: PATCH
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003/file000004?action=append&position=0
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:24 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-request-server-encrypted:
+ - 'true'
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 202
+ message: Accepted
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file upload
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ If-Match:
+ - '*'
+ ParameterSetName:
+ - -f -s -p --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:25 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: PATCH
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003/file000004?action=flush&position=1024&close=true
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:24 GMT
+ etag:
+ - '"0x8D921A8810EBDD7"'
+ last-modified:
+ - Fri, 28 May 2021 07:16:25 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-request-server-encrypted:
+ - 'false'
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file list
+ Connection:
+ - keep-alive
+ ParameterSetName:
+ - -f --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:25 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: GET
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003?resource=filesystem&recursive=true&maxResults=5000
+ response:
+ body:
+ string: '{"paths":[{"contentLength":"1024","creationTime":"132666597846717487","etag":"0x8D921A8810EBDD7","group":"$superuser","lastModified":"Fri,
+ 28 May 2021 07:16:25 GMT","name":"file000004","owner":"$superuser","permissions":"rw-r-----"}]}
+
+ '
+ headers:
+ content-type:
+ - application/json;charset=utf-8
+ date:
+ - Fri, 28 May 2021 07:16:26 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs directory create
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ ParameterSetName:
+ - -f -n --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:26 GMT
+ x-ms-properties:
+ - ''
+ x-ms-version:
+ - '2020-02-10'
+ method: PUT
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003/dir?resource=directory
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:27 GMT
+ etag:
+ - '"0x8D921A882B9A3B3"'
+ last-modified:
+ - Fri, 28 May 2021 07:16:28 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-request-server-encrypted:
+ - 'true'
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 201
+ message: Created
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file list
+ Connection:
+ - keep-alive
+ ParameterSetName:
+ - -f --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:28 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: GET
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003?resource=filesystem&recursive=true&maxResults=5000
+ response:
+ body:
+ string: '{"paths":[{"contentLength":"0","creationTime":"132666597881062323","etag":"0x8D921A882B9A3B3","group":"$superuser","isDirectory":"true","lastModified":"Fri,
+ 28 May 2021 07:16:28 GMT","name":"dir","owner":"$superuser","permissions":"rwxr-x---"},{"contentLength":"1024","creationTime":"132666597846717487","etag":"0x8D921A8810EBDD7","group":"$superuser","lastModified":"Fri,
+ 28 May 2021 07:16:25 GMT","name":"file000004","owner":"$superuser","permissions":"rw-r-----"}]}
+
+ '
+ headers:
+ content-type:
+ - application/json;charset=utf-8
+ date:
+ - Fri, 28 May 2021 07:16:28 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/xml
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-storage-blob/12.6.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:30 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: GET
+ uri: https://clitest000002.blob.core.windows.net/?restype=service&comp=properties
+ response:
+ body:
+ string: "\uFEFF1.0falsefalsefalsefalse1.0truetruetrue71.0falsefalsefalsefalsefalse"
+ headers:
+ content-type:
+ - application/xml
+ date:
+ - Fri, 28 May 2021 07:16:30 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: '
+
+ 1.0truetruetrue71.0falsefalsetrue2false'
+ headers:
+ Accept:
+ - '*/*'
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '542'
+ Content-Type:
+ - application/xml; charset=utf-8
+ User-Agent:
+ - azsdk-python-storage-blob/12.6.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:31 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: PUT
+ uri: https://clitest000002.blob.core.windows.net/?restype=service&comp=properties
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:31 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 202
+ message: Accepted
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/xml
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-storage-blob/12.6.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:31 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: GET
+ uri: https://clitest000002.blob.core.windows.net/?restype=service&comp=properties
+ response:
+ body:
+ string: "\uFEFF1.0falsefalsefalsefalse1.0truetruetrue71.0falsefalsetrue2falsefalse"
+ headers:
+ content-type:
+ - application/xml
+ date:
+ - Fri, 28 May 2021 07:16:31 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/xml
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-storage-blob/12.6.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:32 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: GET
+ uri: https://clitest000002.blob.core.windows.net/?restype=service&comp=properties
+ response:
+ body:
+ string: "\uFEFF1.0falsefalsefalsefalse1.0truetruetrue71.0falsefalsetrue2falsefalse"
+ headers:
+ content-type:
+ - application/xml
+ date:
+ - Fri, 28 May 2021 07:16:33 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file delete
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ ParameterSetName:
+ - -f -p -y --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:43 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: DELETE
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003/file000004?recursive=true
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:44 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs directory delete
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ ParameterSetName:
+ - -f -n -y --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:45 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: DELETE
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003/dir?recursive=true
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:16:46 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file list
+ Connection:
+ - keep-alive
+ ParameterSetName:
+ - -f --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:16:46 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: GET
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003?resource=filesystem&recursive=true&maxResults=5000
+ response:
+ body:
+ string: '{"paths":[]}
+
+ '
+ headers:
+ content-type:
+ - application/json;charset=utf-8
+ date:
+ - Fri, 28 May 2021 07:16:47 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/xml
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-storage-dfs/12.4.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:17:48 GMT
+ x-ms-version:
+ - '2020-06-12'
+ method: GET
+ uri: https://clitest000002.blob.core.windows.net/filesystem000003?restype=container&comp=list&prefix=dir&showonly=deleted
+ response:
+ body:
+ string: "\uFEFFdirdir132666598065709145trueFri,
+ 28 May 2021 07:16:28 GMTFri, 28 May 2021 07:16:28
+ GMTSun, 30 May 2021 07:16:46 GMT0x8D921A882B9A3B30application/octet-streamAAAAAAAAAAA=BlockBlobHottruetrueFri,
+ 28 May 2021 07:16:46 GMT1"
+ headers:
+ content-type:
+ - application/xml
+ date:
+ - Fri, 28 May 2021 07:17:49 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-06-12'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/xml
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-storage-dfs/12.4.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:17:50 GMT
+ x-ms-version:
+ - '2020-06-12'
+ method: GET
+ uri: https://clitest000002.blob.core.windows.net/filesystem000003?restype=container&comp=list&showonly=deleted
+ response:
+ body:
+ string: "\uFEFFdir132666598065709145trueFri,
+ 28 May 2021 07:16:28 GMTFri, 28 May 2021 07:16:28
+ GMTSun, 30 May 2021 07:16:46 GMT0x8D921A882B9A3B30application/octet-streamAAAAAAAAAAA=BlockBlobHottruetrueFri,
+ 28 May 2021 07:16:46 GMT1file000004132666598051069022trueFri,
+ 28 May 2021 07:16:24 GMTFri, 28 May 2021 07:16:25
+ GMTSun, 30 May 2021 07:16:44 GMT0x8D921A8810EBDD71024application/octet-streamAAAAAAAAAAA=BlockBlobHottruetrueFri,
+ 28 May 2021 07:16:45 GMT1"
+ headers:
+ content-type:
+ - application/xml
+ date:
+ - Fri, 28 May 2021 07:17:50 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-06-12'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/xml
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-storage-dfs/12.4.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:17:51 GMT
+ x-ms-version:
+ - '2020-06-12'
+ method: GET
+ uri: https://clitest000002.blob.core.windows.net/filesystem000003?restype=container&comp=list&maxResults=1&showonly=deleted
+ response:
+ body:
+ string: "\uFEFF1dir132666598065709145trueFri,
+ 28 May 2021 07:16:28 GMTFri, 28 May 2021 07:16:28
+ GMTSun, 30 May 2021 07:16:46 GMT0x8D921A882B9A3B30application/octet-streamAAAAAAAAAAA=BlockBlobHottruetrueFri,
+ 28 May 2021 07:16:46 GMT12!364!MDAwMjI4IU1USXhNVGM0T1RReE16Z3hOakU0TVRZeE1qY2dNQ0F2WTJ4cGRHVnpkR1ZtTW5Kc00zRTBNelp4WkRScFpYSXlBVEF4UkRjMU16a3hORVF5TWpBelJEa3ZKSFJ5WVhOb0wyWnBiR1Z6ZVhOMFpXMW9jSEIxYVhCdFpqUnZObTAxZHdFd01VUTNOVE01TVRWRE56VkRSREZHQWk5bWFXeGxkSFptZVRNeWJYRnFiRzFzYkc1a2FYVm1hSElCTWpBeU1TMHdOUzB5T0ZRd056b3hOam8wTlM0eE1EWTVNREl5V2c9PSEwMDAwMjghMjAyMS0wNS0yOFQwNzoxNzo1My42ODM3MDc3WiE-"
+ headers:
+ content-type:
+ - application/xml
+ date:
+ - Fri, 28 May 2021 07:17:53 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-06-12'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/xml
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ User-Agent:
+ - azsdk-python-storage-dfs/12.4.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:17:53 GMT
+ x-ms-version:
+ - '2020-06-12'
+ method: GET
+ uri: https://clitest000002.blob.core.windows.net/filesystem000003?restype=container&comp=list&marker=2%21364%21MDAwMjI4IU1USXhNVGM0T1RReE16Z3hOakU0TVRZeE1qY2dNQ0F2WTJ4cGRHVnpkR1ZtTW5Kc00zRTBNelp4WkRScFpYSXlBVEF4UkRjMU16a3hORVF5TWpBelJEa3ZKSFJ5WVhOb0wyWnBiR1Z6ZVhOMFpXMW9jSEIxYVhCdFpqUnZObTAxZHdFd01VUTNOVE01TVRWRE56VkRSREZHQWk5bWFXeGxkSFptZVRNeWJYRnFiRzFzYkc1a2FYVm1hSElCTWpBeU1TMHdOUzB5T0ZRd056b3hOam8wTlM0eE1EWTVNREl5V2c9PSEwMDAwMjghMjAyMS0wNS0yOFQwNzoxNzo1My42ODM3MDc3WiE-&showonly=deleted
+ response:
+ body:
+ string: "\uFEFF2!364!MDAwMjI4IU1USXhNVGM0T1RReE16Z3hOakU0TVRZeE1qY2dNQ0F2WTJ4cGRHVnpkR1ZtTW5Kc00zRTBNelp4WkRScFpYSXlBVEF4UkRjMU16a3hORVF5TWpBelJEa3ZKSFJ5WVhOb0wyWnBiR1Z6ZVhOMFpXMW9jSEIxYVhCdFpqUnZObTAxZHdFd01VUTNOVE01TVRWRE56VkRSREZHQWk5bWFXeGxkSFptZVRNeWJYRnFiRzFzYkc1a2FYVm1hSElCTWpBeU1TMHdOUzB5T0ZRd056b3hOam8wTlM0eE1EWTVNREl5V2c9PSEwMDAwMjghMjAyMS0wNS0yOFQwNzoxNzo1My42ODM3MDc3WiE-file000004132666598051069022trueFri,
+ 28 May 2021 07:16:24 GMTFri, 28 May 2021 07:16:25
+ GMTSun, 30 May 2021 07:16:44 GMT0x8D921A8810EBDD71024application/octet-streamAAAAAAAAAAA=BlockBlobHottruetrueFri,
+ 28 May 2021 07:16:45 GMT1"
+ headers:
+ content-type:
+ - application/xml
+ date:
+ - Fri, 28 May 2021 07:17:55 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-06-12'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '0'
+ User-Agent:
+ - azsdk-python-storage-dfs/12.4.0b1 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:17:55 GMT
+ x-ms-undelete-source:
+ - filetvfy32mqjlmllndiufhr?deletionid=132666598051069022
+ x-ms-version:
+ - '2020-06-12'
+ method: PUT
+ uri: https://clitest000002.blob.core.windows.net/filesystem000003/file000004?comp=undelete
+ response:
+ body:
+ string: ''
+ headers:
+ content-length:
+ - '0'
+ date:
+ - Fri, 28 May 2021 07:17:56 GMT
+ server:
+ - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
+ x-ms-creation-time:
+ - Fri, 28 May 2021 07:16:24 GMT
+ x-ms-resource-type:
+ - file
+ x-ms-version:
+ - '2020-06-12'
+ status:
+ code: 200
+ message: OK
+- request:
+ body: null
+ headers:
+ Accept:
+ - application/json
+ Accept-Encoding:
+ - gzip, deflate
+ CommandName:
+ - storage fs file list
+ Connection:
+ - keep-alive
+ ParameterSetName:
+ - -f --account-name --account-key
+ User-Agent:
+ - AZURECLI/2.24.0 azsdk-python-storage-dfs/12.3.0 Python/3.8.3 (Windows-10-10.0.19041-SP0)
+ x-ms-date:
+ - Fri, 28 May 2021 07:17:57 GMT
+ x-ms-version:
+ - '2020-02-10'
+ method: GET
+ uri: https://clitest000002.dfs.core.windows.net/filesystem000003?resource=filesystem&recursive=true&maxResults=5000
+ response:
+ body:
+ string: '{"paths":[{"contentLength":"1024","creationTime":"132666597846717487","etag":"0x8D921A8810EBDD7","group":"$superuser","lastModified":"Fri,
+ 28 May 2021 07:16:25 GMT","name":"file000004","owner":"$superuser","permissions":"rw-r-----"}]}
+
+ '
+ headers:
+ content-type:
+ - application/json;charset=utf-8
+ date:
+ - Fri, 28 May 2021 07:17:58 GMT
+ server:
+ - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0
+ transfer-encoding:
+ - chunked
+ x-ms-version:
+ - '2020-02-10'
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/storage_test_util.py b/src/storage-preview/azext_storage_preview/tests/latest/storage_test_util.py
index 5b308cc470c..e2fd3c49061 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/storage_test_util.py
+++ b/src/storage-preview/azext_storage_preview/tests/latest/storage_test_util.py
@@ -10,7 +10,7 @@
from azure.cli.testsdk.preparers import AbstractPreparer
-class StorageScenarioMixin(object):
+class StorageScenarioMixin:
profile = None
def get_current_profile(self):
@@ -30,6 +30,9 @@ def get_account_info(self, group, name):
"""Returns the storage account name and key in a tuple"""
return name, self.get_account_key(group, name)
+ def oauth_cmd(self, cmd, *args, **kwargs):
+ return self.cmd(cmd + ' --auth-mode login', *args, **kwargs)
+
def storage_cmd(self, cmd, account_info, *args):
cmd = cmd.format(*args)
cmd = '{} --account-name {} --account-key {}'.format(cmd, *account_info)
@@ -50,13 +53,18 @@ def create_share(self, account_info, prefix='share', length=24):
self.storage_cmd('storage share create -n {}', account_info, share_name)
return share_name
+ def create_file_system(self, account_info, prefix='filesystem', length=24):
+ filesystem_name = self.create_random_name(prefix=prefix, length=length)
+ self.storage_cmd('storage fs create -n {}', account_info, filesystem_name)
+ return filesystem_name
+
class StorageTestFilesPreparer(AbstractPreparer):
def __init__(self, parameter_name='test_dir'):
super(StorageTestFilesPreparer, self).__init__(name_prefix='test', name_len=24)
self.parameter_name = parameter_name
- def create_resource(self, name, **_kwargs): # pylint: disable=unused-argument
+ def create_resource(self, name, **kwargs):
temp_dir = os.path.join(tempfile.gettempdir(), self.random_name)
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
@@ -78,7 +86,7 @@ def create_resource(self, name, **_kwargs): # pylint: disable=unused-argument
setattr(self, '_temp_dir', temp_dir)
return {self.parameter_name: temp_dir}
- def remove_resource(self, name, **_kwargs): # pylint: disable=unused-argument
+ def remove_resource(self, name, **kwargs):
temp_dir = self.get_temp_dir()
if temp_dir:
shutil.rmtree(temp_dir, ignore_errors=True)
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/test_storage_account_scenarios.py b/src/storage-preview/azext_storage_preview/tests/latest/test_storage_account_scenarios.py
index b1d78f398eb..fbf28dec969 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/test_storage_account_scenarios.py
+++ b/src/storage-preview/azext_storage_preview/tests/latest/test_storage_account_scenarios.py
@@ -148,7 +148,7 @@ def test_storage_account_blob_inventory_policy(self, resource_group, storage_acc
JMESPathCheck("policy.rules[0].definition.filters.blobTypes[0]", "blockBlob"),
JMESPathCheck("policy.rules[0].definition.filters.includeBlobVersions", None),
JMESPathCheck("policy.rules[0].definition.filters.includeSnapshots", None),
- JMESPathCheck("policy.rules[0].definition.filters.prefixMatch", None),
+ JMESPathCheck("policy.rules[0].definition.filters.prefixMatch", []),
JMESPathCheck("policy.rules[0].enabled", True),
JMESPathCheck("policy.rules[0].name", "inventoryPolicyRule1"),
JMESPathCheck("policy.type", "Inventory"),
@@ -162,7 +162,7 @@ def test_storage_account_blob_inventory_policy(self, resource_group, storage_acc
JMESPathCheck("policy.rules[0].definition.filters.blobTypes[0]", "blockBlob"),
JMESPathCheck("policy.rules[0].definition.filters.includeBlobVersions", None),
JMESPathCheck("policy.rules[0].definition.filters.includeSnapshots", None),
- JMESPathCheck("policy.rules[0].definition.filters.prefixMatch", None),
+ JMESPathCheck("policy.rules[0].definition.filters.prefixMatch", []),
JMESPathCheck("policy.rules[0].enabled", True),
JMESPathCheck("policy.rules[0].name", "inventoryPolicyRule1"),
JMESPathCheck("policy.type", "Inventory"),
diff --git a/src/storage-preview/azext_storage_preview/tests/latest/test_storage_adls_scenarios.py b/src/storage-preview/azext_storage_preview/tests/latest/test_storage_adls_scenarios.py
index 9a06e0317b6..e448e6c23ea 100644
--- a/src/storage-preview/azext_storage_preview/tests/latest/test_storage_adls_scenarios.py
+++ b/src/storage-preview/azext_storage_preview/tests/latest/test_storage_adls_scenarios.py
@@ -5,14 +5,74 @@
import os
import unittest
+import time
from azure.cli.testsdk import (LiveScenarioTest, ResourceGroupPreparer, ScenarioTest,
- JMESPathCheck, api_version_constraint)
+ JMESPathCheck, api_version_constraint, StorageAccountPreparer)
from .storage_test_util import StorageScenarioMixin, StorageTestFilesPreparer
-from ...profiles import CUSTOM_MGMT_PREVIEW_STORAGE
+from ...profiles import CUSTOM_MGMT_PREVIEW_STORAGE, CUSTOM_DATA_STORAGE_FILEDATALAKE
class StorageADLSTests(StorageScenarioMixin, ScenarioTest):
+ @api_version_constraint(CUSTOM_DATA_STORAGE_FILEDATALAKE, min_api='2020-06-12')
+ @ResourceGroupPreparer()
+ @StorageAccountPreparer(kind="StorageV2", hns=True, location="eastus2euap")
+ def test_storage_fs_soft_delete(self, resource_group, storage_account):
+ account_info = self.get_account_info(resource_group, storage_account)
+ container = self.create_file_system(account_info)
+ # Prepare
+ local_file = self.create_temp_file(1)
+ file_name = self.create_random_name(prefix='file', length=24)
+ dir_name = 'dir'
+
+ self.storage_cmd('storage fs file upload -f {} -s "{}" -p {} ', account_info,
+ container, local_file, file_name)
+ self.assertEqual(len(self.storage_cmd('storage fs file list -f {}',
+ account_info, container).get_output_in_json()), 1)
+ self.storage_cmd('storage fs directory create -f {} -n {} ', account_info,
+ container, dir_name)
+ self.assertEqual(len(self.storage_cmd('storage fs file list -f {}',
+ account_info, container).get_output_in_json()), 2)
+
+ # set delete-policy to enable soft-delete
+ self.storage_cmd('storage fs service-properties update --delete-retention --delete-retention-period 2',
+ account_info)
+ self.storage_cmd('storage fs service-properties show',
+ account_info).assert_with_checks(JMESPathCheck('delete_retention_policy.enabled', True),
+ JMESPathCheck('delete_retention_policy.days', 2))
+ time.sleep(10)
+ # soft-delete and check
+ self.storage_cmd('storage fs file delete -f {} -p {} -y', account_info, container, file_name)
+ self.storage_cmd('storage fs directory delete -f {} -n {} -y', account_info, container, dir_name)
+ self.assertEqual(len(self.storage_cmd('storage fs file list -f {}',
+ account_info, container).get_output_in_json()), 0)
+
+ time.sleep(60)
+ result = self.storage_cmd('storage fs list-deleted-path -f {} --path-prefix {} ',
+ account_info, container, dir_name).get_output_in_json()
+ self.assertEqual(len(result), 1)
+
+ result = self.storage_cmd('storage fs list-deleted-path -f {}', account_info, container)\
+ .get_output_in_json()
+ self.assertEqual(len(result), 2)
+
+ result = self.storage_cmd('storage fs list-deleted-path -f {} --num-results 1', account_info, container)\
+ .get_output_in_json()
+ self.assertEqual(len(result), 2)
+ marker = result[-1]['nextMarker']
+
+ result = self.storage_cmd('storage fs list-deleted-path -f {} --marker {}', account_info, container, marker)\
+ .get_output_in_json()
+ self.assertEqual(len(result), 1)
+
+ deleted_version = result[0]["deletionId"]
+
+ # undelete and check
+ self.storage_cmd('storage fs undelete-path -f {} --deleted-path-name {} --deletion-id {}',
+ account_info, container, file_name, deleted_version)
+ self.assertEqual(len(self.storage_cmd('storage fs file list -f {}',
+ account_info, container).get_output_in_json()), 1)
+
@api_version_constraint(CUSTOM_MGMT_PREVIEW_STORAGE, min_api='2018-02-01')
@ResourceGroupPreparer()
def test_storage_adls_blob(self, resource_group):
diff --git a/src/storage-preview/azext_storage_preview/track2_util.py b/src/storage-preview/azext_storage_preview/track2_util.py
index a2cc329976a..b7e8164f5a4 100644
--- a/src/storage-preview/azext_storage_preview/track2_util.py
+++ b/src/storage-preview/azext_storage_preview/track2_util.py
@@ -21,3 +21,28 @@ def make_file_url(client, directory_name, file_name, sas_token=None):
url += '?' + sas_token
return url
+
+
+def list_generator(pages, num_results):
+ result = []
+
+ # get first page items
+ page = list(next(pages))
+ result += page
+
+ while True:
+ if not pages.continuation_token:
+ break
+
+ # handle num results
+ if num_results is not None:
+ if num_results == len(result):
+ if pages.continuation_token:
+ next_marker = {"nextMarker": pages.continuation_token}
+ result.append(next_marker)
+ break
+
+ page = list(next(pages))
+ result += page
+
+ return result
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/__init__.py
new file mode 100644
index 00000000000..a5b81f3bde4
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/__init__.py
@@ -0,0 +1,6 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/__init__.py
new file mode 100644
index 00000000000..459c4c07b44
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/__init__.py
@@ -0,0 +1,103 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._download import StorageStreamDownloader
+from ._data_lake_file_client import DataLakeFileClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._file_system_client import FileSystemClient
+from ._data_lake_service_client import DataLakeServiceClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._models import (
+ LocationMode,
+ ResourceTypes,
+ FileSystemProperties,
+ FileSystemPropertiesPaged,
+ DirectoryProperties,
+ FileProperties,
+ PathProperties,
+ LeaseProperties,
+ ContentSettings,
+ AccountSasPermissions,
+ FileSystemSasPermissions,
+ DirectorySasPermissions,
+ FileSasPermissions,
+ UserDelegationKey,
+ PublicAccess,
+ AccessPolicy,
+ DelimitedTextDialect,
+ DelimitedJsonDialect,
+ ArrowDialect,
+ ArrowType,
+ DataLakeFileQueryError,
+ AccessControlChangeResult,
+ AccessControlChangeCounters,
+ AccessControlChangeFailure,
+ AccessControlChanges,
+ AnalyticsLogging,
+ Metrics,
+ RetentionPolicy,
+ StaticWebsite,
+ CorsRule,
+ DeletedPathProperties
+)
+
+from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \
+ generate_file_sas
+
+from ._shared.policies import ExponentialRetry, LinearRetry
+from ._shared.models import StorageErrorCode
+from ._version import VERSION
+
+__version__ = VERSION
+
+__all__ = [
+ 'DataLakeServiceClient',
+ 'FileSystemClient',
+ 'DataLakeFileClient',
+ 'DataLakeDirectoryClient',
+ 'DataLakeLeaseClient',
+ 'ExponentialRetry',
+ 'LinearRetry',
+ 'LocationMode',
+ 'PublicAccess',
+ 'AccessPolicy',
+ 'ResourceTypes',
+ 'StorageErrorCode',
+ 'UserDelegationKey',
+ 'FileSystemProperties',
+ 'FileSystemPropertiesPaged',
+ 'DirectoryProperties',
+ 'FileProperties',
+ 'PathProperties',
+ 'LeaseProperties',
+ 'ContentSettings',
+ 'AccessControlChangeResult',
+ 'AccessControlChangeCounters',
+ 'AccessControlChangeFailure',
+ 'AccessControlChanges',
+ 'AccountSasPermissions',
+ 'FileSystemSasPermissions',
+ 'DirectorySasPermissions',
+ 'FileSasPermissions',
+ 'generate_account_sas',
+ 'generate_file_system_sas',
+ 'generate_directory_sas',
+ 'generate_file_sas',
+ 'VERSION',
+ 'StorageStreamDownloader',
+ 'DelimitedTextDialect',
+ 'DelimitedJsonDialect',
+ 'DataLakeFileQueryError',
+ 'ArrowDialect',
+ 'ArrowType',
+ 'DataLakeFileQueryError',
+ 'AnalyticsLogging',
+ 'Metrics',
+ 'RetentionPolicy',
+ 'StaticWebsite',
+ 'CorsRule',
+ 'DeletedPathProperties'
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_directory_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_directory_client.py
new file mode 100644
index 00000000000..c42391e6071
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_directory_client.py
@@ -0,0 +1,563 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import Any
+
+try:
+ from urllib.parse import quote, unquote
+except ImportError:
+ from urllib2 import quote, unquote # type: ignore
+from azure.core.pipeline import Pipeline
+from ._deserialize import deserialize_dir_properties
+from ._shared.base_client import TransportWrapper, parse_connection_str
+from ._data_lake_file_client import DataLakeFileClient
+from ._models import DirectoryProperties, FileProperties
+from ._path_client import PathClient
+
+
+class DataLakeDirectoryClient(PathClient):
+ """A client to interact with the DataLake directory, even if the directory may not yet exist.
+
+ For operations relating to a specific subdirectory or file under the directory, a directory client or file client
+ can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
+
+ :ivar str url:
+ The full endpoint URL to the file system, including SAS token if used.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URI to the storage account.
+ :param file_system_name:
+ The file system for the directory or files.
+ :type file_system_name: str
+ :param directory_name:
+ The whole path of the directory. eg. {directory under file system}/{directory to interact with}
+ :type directory_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, and account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_instantiate_client.py
+ :start-after: [START instantiate_directory_client_from_conn_str]
+ :end-before: [END instantiate_directory_client_from_conn_str]
+ :language: python
+ :dedent: 4
+ :caption: Creating the DataLakeServiceClient from connection string.
+ """
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ directory_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name,
+ credential=credential, **kwargs)
+
+ @classmethod
+ def from_connection_string(
+ cls, conn_str, # type: str
+ file_system_name, # type: str
+ directory_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ): # type: (...) -> DataLakeDirectoryClient
+ """
+ Create DataLakeDirectoryClient from a Connection String.
+
+ :param str conn_str:
+ A connection string to an Azure Storage account.
+ :param file_system_name:
+ The name of file system to interact with.
+ :type file_system_name: str
+ :param directory_name:
+ The name of directory to interact with. The directory is under file system.
+ :type directory_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, and account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :return a DataLakeDirectoryClient
+ :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient
+ """
+ account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+ return cls(
+ account_url, file_system_name=file_system_name, directory_name=directory_name,
+ credential=credential, **kwargs)
+
+ def create_directory(self, metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Create a new directory.
+
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: response dict (Etag and last modified).
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory.py
+ :start-after: [START create_directory]
+ :end-before: [END create_directory]
+ :language: python
+ :dedent: 8
+ :caption: Create directory.
+ """
+ return self._create('directory', metadata=metadata, **kwargs)
+
+ def delete_directory(self, **kwargs):
+ # type: (...) -> None
+ """
+ Marks the specified directory for deletion.
+
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory.py
+ :start-after: [START delete_directory]
+ :end-before: [END delete_directory]
+ :language: python
+ :dedent: 4
+ :caption: Delete directory.
+ """
+ return self._delete(recursive=True, **kwargs)
+
+ def get_directory_properties(self, **kwargs):
+ # type: (**Any) -> DirectoryProperties
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the directory. It does not return the content of the directory.
+
+ :keyword lease:
+ Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: DirectoryProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory.py
+ :start-after: [START get_directory_properties]
+ :end-before: [END get_directory_properties]
+ :language: python
+ :dedent: 4
+ :caption: Getting the properties for a file/directory.
+ """
+ return self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access
+
+ def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a directory exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return self._exists(**kwargs)
+
+ def rename_directory(self, new_name, **kwargs):
+ # type: (str, **Any) -> DataLakeDirectoryClient
+ """
+ Rename the source directory.
+
+ :param str new_name:
+ the new directory name the user want to rename to.
+ The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
+ :keyword source_lease:
+ A lease ID for the source path. If specified,
+ the source path must have an active lease and the leaase ID must
+ match.
+ :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory.py
+ :start-after: [START rename_directory]
+ :end-before: [END rename_directory]
+ :language: python
+ :dedent: 4
+ :caption: Rename the source directory.
+ """
+ new_name = new_name.strip('/')
+ new_file_system = new_name.split('/')[0]
+ new_path_and_token = new_name[len(new_file_system):].strip('/').split('?')
+ new_path = new_path_and_token[0]
+ try:
+ new_dir_sas = new_path_and_token[1] or self._query_str.strip('?')
+ except IndexError:
+ if not self._raw_credential and new_file_system != self.file_system_name:
+ raise ValueError("please provide the sas token for the new file")
+ if not self._raw_credential and new_file_system == self.file_system_name:
+ new_dir_sas = self._query_str.strip('?')
+
+ new_directory_client = DataLakeDirectoryClient(
+ "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path,
+ credential=self._raw_credential or new_dir_sas,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+ new_directory_client._rename_path( # pylint: disable=protected-access
+ '/{}/{}{}'.format(quote(unquote(self.file_system_name)),
+ quote(unquote(self.path_name)),
+ self._query_str),
+ **kwargs)
+ return new_directory_client
+
+ def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Create a subdirectory and return the subdirectory client to be interacted with.
+
+ :param sub_directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient for the subdirectory.
+ """
+ subdir = self.get_sub_directory_client(sub_directory)
+ subdir.create_directory(metadata=metadata, **kwargs)
+ return subdir
+
+ def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Marks the specified subdirectory for deletion.
+
+ :param sub_directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient for the subdirectory
+ """
+ subdir = self.get_sub_directory_client(sub_directory)
+ subdir.delete_directory(**kwargs)
+ return subdir
+
+ def create_file(self, file, # type: Union[FileProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeFileClient
+ """
+ Create a new file and return the file client to be interacted with.
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties.
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeFileClient
+ """
+ file_client = self.get_file_client(file)
+ file_client.create_file(**kwargs)
+ return file_client
+
+ def get_file_client(self, file # type: Union[FileProperties, str]
+ ):
+ # type: (...) -> DataLakeFileClient
+ """Get a client to interact with the specified file.
+
+ The file need not already exist.
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties. eg. directory/subdirectory/file
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :returns: A DataLakeFileClient.
+ :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+ """
+ try:
+ file_path = file.get('name')
+ except AttributeError:
+ file_path = self.path_name + '/' + str(file)
+
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeFileClient(
+ self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str]
+ ):
+ # type: (...) -> DataLakeDirectoryClient
+ """Get a client to interact with the specified subdirectory of the current directory.
+
+ The sub subdirectory need not already exist.
+
+ :param sub_directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+ """
+ try:
+ subdir_path = sub_directory.get('name')
+ except AttributeError:
+ subdir_path = self.path_name + '/' + str(sub_directory)
+
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeDirectoryClient(
+ self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_file_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_file_client.py
new file mode 100644
index 00000000000..e15842dc3b6
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_file_client.py
@@ -0,0 +1,777 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from io import BytesIO
+from typing import Any
+
+try:
+ from urllib.parse import quote, unquote
+except ImportError:
+ from urllib2 import quote, unquote # type: ignore
+
+import six
+
+from azure.core.exceptions import HttpResponseError
+from ._quick_query_helper import DataLakeFileQueryReader
+from ._shared.base_client import parse_connection_str
+from ._shared.request_handlers import get_length, read_length
+from ._shared.response_handlers import return_response_headers
+from ._shared.uploads import IterStreamer
+from ._upload_helper import upload_datalake_file
+from ._download import StorageStreamDownloader
+from ._path_client import PathClient
+from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \
+ convert_datetime_to_rfc1123
+from ._deserialize import process_storage_error, deserialize_file_properties
+from ._models import FileProperties, DataLakeFileQueryError
+
+
+class DataLakeFileClient(PathClient):
+ """A client to interact with the DataLake file, even if the file may not yet exist.
+
+ :ivar str url:
+ The full endpoint URL to the file system, including SAS token if used.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URI to the storage account.
+ :param file_system_name:
+ The file system for the directory or files.
+ :type file_system_name: str
+ :param file_path:
+ The whole file path, so that to interact with a specific file.
+ eg. "{directory}/{subdirectory}/{file}"
+ :type file_path: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_instantiate_client.py
+ :start-after: [START instantiate_file_client_from_conn_str]
+ :end-before: [END instantiate_file_client_from_conn_str]
+ :language: python
+ :dedent: 4
+ :caption: Creating the DataLakeServiceClient from connection string.
+ """
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ file_path, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
+ credential=credential, **kwargs)
+
+ @classmethod
+ def from_connection_string(
+ cls, conn_str, # type: str
+ file_system_name, # type: str
+ file_path, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ): # type: (...) -> DataLakeFileClient
+ """
+ Create DataLakeFileClient from a Connection String.
+
+ :param str conn_str:
+ A connection string to an Azure Storage account.
+ :param file_system_name: The name of file system to interact with.
+ :type file_system_name: str
+ :param directory_name: The name of directory to interact with. The directory is under file system.
+ :type directory_name: str
+ :param file_name: The name of file to interact with. The file is under directory.
+ :type file_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :return a DataLakeFileClient
+ :rtype ~azure.storage.filedatalake.DataLakeFileClient
+ """
+ account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+ return cls(
+ account_url, file_system_name=file_system_name, file_path=file_path,
+ credential=credential, **kwargs)
+
+ def create_file(self, content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Create a new file.
+
+ :param ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: response dict (Etag and last modified).
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download.py
+ :start-after: [START create_file]
+ :end-before: [END create_file]
+ :language: python
+ :dedent: 4
+ :caption: Create file.
+ """
+ return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
+
+ def delete_file(self, **kwargs):
+ # type: (...) -> None
+ """
+ Marks the specified file for deletion.
+
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download.py
+ :start-after: [START delete_file]
+ :end-before: [END delete_file]
+ :language: python
+ :dedent: 4
+ :caption: Delete file.
+ """
+ return self._delete(**kwargs)
+
+ def get_file_properties(self, **kwargs):
+ # type: (**Any) -> FileProperties
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the file. It does not return the content of the file.
+
+ :keyword lease:
+ Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: FileProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download.py
+ :start-after: [START get_file_properties]
+ :end-before: [END get_file_properties]
+ :language: python
+ :dedent: 4
+ :caption: Getting the properties for a file.
+ """
+ return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access
+
+ def set_file_expiry(self, expiry_options, # type: str
+ expires_on=None, # type: Optional[Union[datetime, int]]
+ **kwargs):
+ # type: (str, Optional[Union[datetime, int]], **Any) -> None
+ """Sets the time a file will expire and be deleted.
+
+ :param str expiry_options:
+ Required. Indicates mode of the expiry time.
+ Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
+ :param datetime or int expires_on:
+ The time to set the file to expiry.
+ When expiry_options is RelativeTo*, expires_on should be an int in milliseconds.
+ If the type of expires_on is datetime, it should be in UTC time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ try:
+ expires_on = convert_datetime_to_rfc1123(expires_on)
+ except AttributeError:
+ expires_on = str(expires_on)
+ self._datalake_client_for_blob_operation.path \
+ .set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access
+
+ def _upload_options( # pylint:disable=too-many-statements
+ self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+
+ encoding = kwargs.pop('encoding', 'UTF-8')
+ if isinstance(data, six.text_type):
+ data = data.encode(encoding) # type: ignore
+ if length is None:
+ length = get_length(data)
+ if isinstance(data, bytes):
+ data = data[:length]
+
+ if isinstance(data, bytes):
+ stream = BytesIO(data)
+ elif hasattr(data, 'read'):
+ stream = data
+ elif hasattr(data, '__iter__'):
+ stream = IterStreamer(data, encoding=encoding)
+ else:
+ raise TypeError("Unsupported data type: {}".format(type(data)))
+
+ validate_content = kwargs.pop('validate_content', False)
+ content_settings = kwargs.pop('content_settings', None)
+ metadata = kwargs.pop('metadata', None)
+ max_concurrency = kwargs.pop('max_concurrency', 1)
+
+ kwargs['properties'] = add_metadata_headers(metadata)
+ kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
+ kwargs['modified_access_conditions'] = get_mod_conditions(kwargs)
+
+ if content_settings:
+ kwargs['path_http_headers'] = get_path_http_headers(content_settings)
+
+ kwargs['stream'] = stream
+ kwargs['length'] = length
+ kwargs['validate_content'] = validate_content
+ kwargs['max_concurrency'] = max_concurrency
+ kwargs['client'] = self._client.path
+ kwargs['file_settings'] = self._config
+
+ return kwargs
+
+ def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ overwrite=False, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+ """
+ Upload data to a file.
+
+ :param data: Content to be uploaded to file
+ :param int length: Size of the data in bytes.
+ :param bool overwrite: to overwrite an existing file or not.
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword metadata:
+ Name-value pairs associated with the blob as metadata.
+ :paramtype metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
+ Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions: Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the file. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword int chunk_size:
+ The maximum chunk size for uploading a file in chunks.
+ Defaults to 100*1024*1024, or 100MB.
+ :return: response dict (Etag and last modified).
+ """
+ options = self._upload_options(
+ data,
+ length=length,
+ overwrite=overwrite,
+ **kwargs)
+ return upload_datalake_file(**options)
+
+ @staticmethod
+ def _append_data_options(data, offset, length=None, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+
+ if isinstance(data, six.text_type):
+ data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore
+ if length is None:
+ length = get_length(data)
+ if length is None:
+ length, data = read_length(data)
+ if isinstance(data, bytes):
+ data = data[:length]
+
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+
+ options = {
+ 'body': data,
+ 'position': offset,
+ 'content_length': length,
+ 'lease_access_conditions': access_conditions,
+ 'validate_content': kwargs.pop('validate_content', False),
+ 'timeout': kwargs.pop('timeout', None),
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
+ offset, # type: int
+ length=None, # type: Optional[int]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """Append data to the file.
+
+ :param data: Content to be appended to file
+ :param offset: start position of the data to be appended to.
+ :param length: Size of the data in bytes.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the block content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https as https (the default)
+ will already validate. Note that this MD5 hash is not stored with the
+ file.
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :return: dict of the response header
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download.py
+ :start-after: [START append_data]
+ :end-before: [END append_data]
+ :language: python
+ :dedent: 4
+ :caption: Append data to the file.
+ """
+ options = self._append_data_options(
+ data,
+ offset,
+ length=length,
+ **kwargs)
+ try:
+ return self._client.path.append_data(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ @staticmethod
+ def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_mod_conditions(kwargs)
+
+ path_http_headers = None
+ if content_settings:
+ path_http_headers = get_path_http_headers(content_settings)
+
+ options = {
+ 'position': offset,
+ 'content_length': 0,
+ 'path_http_headers': path_http_headers,
+ 'retain_uncommitted_data': retain_uncommitted_data,
+ 'close': kwargs.pop('close', False),
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ def flush_data(self, offset, # type: int
+ retain_uncommitted_data=False, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """ Commit the previous appended data.
+
+ :param offset: offset is equal to the length of the file after commit the
+ previous appended data.
+ :param bool retain_uncommitted_data: Valid only for flush operations. If
+ "true", uncommitted data is retained after the flush operation
+ completes; otherwise, the uncommitted data is deleted after the flush
+ operation. The default is false. Data at offsets less than the
+ specified position are written to the file when flush succeeds, but
+ this optional parameter allows data after the flush position to be
+ retained for a future flush operation.
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword bool close: Azure Storage Events allow applications to receive
+ notifications when files change. When Azure Storage Events are
+ enabled, a file changed event is raised. This event has a property
+ indicating whether this is the final change to distinguish the
+ difference between an intermediate flush to a file stream and the
+ final close of a file stream. The close query parameter is valid only
+ when the action is "flush" and change notifications are enabled. If
+ the value of close is "true" and the flush operation completes
+ successfully, the service raises a file change notification with a
+ property indicating that this is the final update (the file stream has
+ been closed). If "false" a change notification is raised indicating
+ the file has changed. The default is false. This query parameter is
+ set to true by the Hadoop ABFS driver to indicate that the file stream
+ has been closed."
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :return: response header in dict
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START upload_file_to_file_system]
+ :end-before: [END upload_file_to_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Commit the previous appended data.
+ """
+ options = self._flush_data_options(
+ offset,
+ retain_uncommitted_data=retain_uncommitted_data, **kwargs)
+ try:
+ return self._client.path.flush_data(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ def download_file(self, offset=None, length=None, **kwargs):
+ # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
+ """Downloads a file to the StorageStreamDownloader. The readall() method must
+ be used to read all the content, or readinto() must be used to download the file into
+ a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks.
+
+ :param int offset:
+ Start of byte range to use for downloading a section of the file.
+ Must be set if length is provided.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword lease:
+ If specified, download only succeeds if the file's lease is active
+ and matches this ID. Required if the file has an active lease.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: A streaming object (StorageStreamDownloader)
+ :rtype: ~azure.storage.filedatalake.StorageStreamDownloader
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download.py
+ :start-after: [START read_file]
+ :end-before: [END read_file]
+ :language: python
+ :dedent: 4
+ :caption: Return the downloaded data.
+ """
+ downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs)
+ return StorageStreamDownloader(downloader)
+
+ def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a file exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return self._exists(**kwargs)
+
+ def rename_file(self, new_name, **kwargs):
+ # type: (str, **Any) -> DataLakeFileClient
+ """
+ Rename the source file.
+
+ :param str new_name: the new file name the user want to rename to.
+ The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword source_lease: A lease ID for the source path. If specified,
+ the source path must have an active lease and the leaase ID must
+ match.
+ :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: the renamed file client
+ :rtype: DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download.py
+ :start-after: [START rename_file]
+ :end-before: [END rename_file]
+ :language: python
+ :dedent: 4
+ :caption: Rename the source file.
+ """
+ new_name = new_name.strip('/')
+ new_file_system = new_name.split('/')[0]
+ new_path_and_token = new_name[len(new_file_system):].strip('/').split('?')
+ new_path = new_path_and_token[0]
+ try:
+ new_file_sas = new_path_and_token[1] or self._query_str.strip('?')
+ except IndexError:
+ if not self._raw_credential and new_file_system != self.file_system_name:
+ raise ValueError("please provide the sas token for the new file")
+ if not self._raw_credential and new_file_system == self.file_system_name:
+ new_file_sas = self._query_str.strip('?')
+
+ new_file_client = DataLakeFileClient(
+ "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path,
+ credential=self._raw_credential or new_file_sas,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ _location_mode=self._location_mode, require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function
+ )
+ new_file_client._rename_path( # pylint: disable=protected-access
+ '/{}/{}{}'.format(quote(unquote(self.file_system_name)),
+ quote(unquote(self.path_name)),
+ self._query_str),
+ **kwargs)
+ return new_file_client
+
+ def query_file(self, query_expression, **kwargs):
+ # type: (str, **Any) -> DataLakeFileQueryReader
+ """
+ Enables users to select/project on datalake file data by providing simple query expressions.
+ This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data.
+
+ :param str query_expression:
+ Required. a query statement.
+ eg. Select * from DataLakeStorage
+ :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error:
+ A function to be called on any processing errors returned by the service.
+ :keyword file_format:
+ Optional. Defines the serialization of the data currently stored in the file. The default is to
+ treat the file data as CSV data formatted in the default dialect. This can be overridden with
+ a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect.
+ :paramtype file_format:
+ ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect
+ :keyword output_format:
+ Optional. Defines the output serialization for the data stream. By default the data will be returned
+ as it is represented in the file. By providing an output format, the file data will be reformatted
+ according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect.
+ :paramtype output_format:
+ ~azure.storage.filedatalake.DelimitedTextDialect, ~azure.storage.filedatalake.DelimitedJsonDialect
+ or list[~azure.storage.filedatalake.ArrowDialect]
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A streaming object (DataLakeFileQueryReader)
+ :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_query.py
+ :start-after: [START query]
+ :end-before: [END query]
+ :language: python
+ :dedent: 4
+ :caption: select/project on datalake file data by providing simple query expressions.
+ """
+ query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage")
+ blob_quick_query_reader = self._blob_client.query_blob(query_expression,
+ blob_format=kwargs.pop('file_format', None),
+ error_cls=DataLakeFileQueryError,
+ **kwargs)
+ return DataLakeFileQueryReader(blob_quick_query_reader)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_lease.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_lease.py
new file mode 100644
index 00000000000..ed85b145b54
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_lease.py
@@ -0,0 +1,245 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import uuid
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any,
+ TypeVar, TYPE_CHECKING
+)
+from ...blob import BlobLeaseClient
+
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ FileSystemClient = TypeVar("FileSystemClient")
+ DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
+ DataLakeFileClient = TypeVar("DataLakeFileClient")
+
+
+class DataLakeLeaseClient(object):
+ """Creates a new DataLakeLeaseClient.
+
+ This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+ :ivar str id:
+ The ID of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired.
+ :ivar str etag:
+ The ETag of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired or modified.
+ :ivar ~datetime.datetime last_modified:
+ The last modified timestamp of the lease currently being maintained.
+ This will be `None` if no lease has yet been acquired or modified.
+
+ :param client:
+ The client of the file system, directory, or file to lease.
+ :type client: ~azure.storage.filedatalake.FileSystemClient or
+ ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient
+ :param str lease_id:
+ A string representing the lease ID of an existing lease. This value does not
+ need to be specified in order to acquire a new lease, or break one.
+ """
+ def __init__(
+ self, client, lease_id=None
+ ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
+ # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
+ self.id = lease_id or str(uuid.uuid4())
+ self.last_modified = None
+ self.etag = None
+
+ if hasattr(client, '_blob_client'):
+ _client = client._blob_client # type: ignore # pylint: disable=protected-access
+ elif hasattr(client, '_container_client'):
+ _client = client._container_client # type: ignore # pylint: disable=protected-access
+ else:
+ raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
+
+ self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.release()
+
+ def acquire(self, lease_duration=-1, **kwargs):
+ # type: (int, Optional[int], **Any) -> None
+ """Requests a new lease.
+
+ If the file/file system does not have an active lease, the DataLake service creates a
+ lease on the file/file system and returns a new lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
+ self._update_lease_client_attributes()
+
+ def renew(self, **kwargs):
+ # type: (Any) -> None
+ """Renews the lease.
+
+ The lease can be renewed if the lease ID specified in the
+ lease client matches that associated with the file system or file. Note that
+ the lease may be renewed even if it has expired as long as the file system
+ or file has not been leased again since the expiration of that lease. When you
+ renew a lease, the lease duration clock resets.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ self._blob_lease_client.renew(**kwargs)
+ self._update_lease_client_attributes()
+
+ def release(self, **kwargs):
+ # type: (Any) -> None
+ """Release the lease.
+
+ The lease may be released if the client lease id specified matches
+ that associated with the file system or file. Releasing the lease allows another client
+ to immediately acquire the lease for the file system or file as soon as the release is complete.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ self._blob_lease_client.release(**kwargs)
+ self._update_lease_client_attributes()
+
+ def change(self, proposed_lease_id, **kwargs):
+ # type: (str, Any) -> None
+ """Change the lease ID of an active lease.
+
+ :param str proposed_lease_id:
+ Proposed lease ID, in a GUID string format. The DataLake service returns 400
+ (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
+ self._update_lease_client_attributes()
+
+ def break_lease(self, lease_break_period=None, **kwargs):
+ # type: (Optional[int], Any) -> int
+ """Break the lease, if the file system or file has an active lease.
+
+ Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+ the request is not required to specify a matching lease ID. When a lease
+ is broken, the lease break period is allowed to elapse, during which time
+ no lease operation except break and release can be performed on the file system or file.
+ When a lease is successfully broken, the response indicates the interval
+ in seconds until a new lease can be acquired.
+
+ :param int lease_break_period:
+ This is the proposed duration of seconds that the lease
+ should continue before it is broken, between 0 and 60 seconds. This
+ break period is only used if it is shorter than the time remaining
+ on the lease. If longer, the time remaining on the lease is used.
+ A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break
+ period. If this header does not appear with a break
+ operation, a fixed-duration lease breaks after the remaining lease
+ period elapses, and an infinite lease breaks immediately.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Approximate time remaining in the lease period, in seconds.
+ :rtype: int
+ """
+ self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
+
+ def _update_lease_client_attributes(self):
+ self.id = self._blob_lease_client.id # type: str
+ self.last_modified = self._blob_lease_client.last_modified # type: datetime
+ self.etag = self._blob_lease_client.etag # type: str
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_service_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_service_client.py
new file mode 100644
index 00000000000..af66b02a5aa
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_data_lake_service_client.py
@@ -0,0 +1,556 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import Optional, Dict, Any
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse # type: ignore
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+
+from ...blob import BlobServiceClient
+from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str
+from ._deserialize import get_datalake_service_properties
+from ._file_system_client import FileSystemClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._data_lake_file_client import DataLakeFileClient
+from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode
+from ._serialize import convert_dfs_url_to_blob_url
+
+
+class DataLakeServiceClient(StorageAccountHostsMixin):
+ """A client to interact with the DataLake Service at the account level.
+
+ This client provides operations to retrieve and configure the account properties
+ as well as list, create and delete file systems within the account.
+ For operations relating to a specific file system, directory or file, clients for those entities
+ can also be retrieved using the `get_client` functions.
+
+ :ivar str url:
+ The full endpoint URL to the datalake service endpoint.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URL to the DataLake storage account. Any other entities included
+ in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
+ authenticated with a SAS token.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START create_datalake_service_client]
+ :end-before: [END create_datalake_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Creating the DataLakeServiceClient from connection string.
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START create_datalake_service_client_oauth]
+ :end-before: [END create_datalake_service_client_oauth]
+ :language: python
+ :dedent: 8
+ :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
+ """
+
+ def __init__(
+ self, account_url, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ try:
+ if not account_url.lower().startswith('http'):
+ account_url = "https://" + account_url
+ except AttributeError:
+ raise ValueError("Account URL must be a string.")
+ parsed_url = urlparse(account_url.rstrip('/'))
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(account_url))
+
+ blob_account_url = convert_dfs_url_to_blob_url(account_url)
+ self._blob_account_url = blob_account_url
+ self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs)
+ self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access
+
+ _, sas_token = parse_query(parsed_url.query)
+ self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+ super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs',
+ credential=self._raw_credential, **kwargs)
+ # ADLS doesn't support secondary endpoint, make sure it's empty
+ self._hosts[LocationMode.SECONDARY] = ""
+
+ def __enter__(self):
+ self._blob_service_client.__enter__()
+ return self
+
+ def __exit__(self, *args):
+ self._blob_service_client.close()
+
+ def close(self):
+ # type: () -> None
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ self._blob_service_client.close()
+
+ def _format_url(self, hostname):
+ """Format the endpoint URL according to hostname
+ """
+ formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str)
+ return formated_url
+
+ @classmethod
+ def from_connection_string(
+ cls, conn_str, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ): # type: (...) -> DataLakeServiceClient
+ """
+ Create DataLakeServiceClient from a Connection String.
+
+ :param str conn_str:
+ A connection string to an Azure Storage account.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :return a DataLakeServiceClient
+ :rtype ~azure.storage.filedatalake.DataLakeServiceClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START create_data_lake_service_client_from_conn_str]
+ :end-before: [END create_data_lake_service_client_from_conn_str]
+ :language: python
+ :dedent: 8
+ :caption: Creating the DataLakeServiceClient from a connection string.
+ """
+ account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+ return cls(account_url, credential=credential, **kwargs)
+
+ def get_user_delegation_key(self, key_start_time, # type: datetime
+ key_expiry_time, # type: datetime
+ **kwargs # type: Any
+ ):
+ # type: (...) -> UserDelegationKey
+ """
+ Obtain a user delegation key for the purpose of signing SAS tokens.
+ A token credential must be present on the service object for this request to succeed.
+
+ :param ~datetime.datetime key_start_time:
+ A DateTime value. Indicates when the key becomes valid.
+ :param ~datetime.datetime key_expiry_time:
+ A DateTime value. Indicates when the key stops being valid.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: The user delegation key.
+ :rtype: ~azure.storage.filedatalake.UserDelegationKey
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START get_user_delegation_key]
+ :end-before: [END get_user_delegation_key]
+ :language: python
+ :dedent: 8
+ :caption: Get user delegation key from datalake service client.
+ """
+ delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time,
+ key_expiry_time=key_expiry_time,
+ **kwargs) # pylint: disable=protected-access
+ return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access
+
+ def list_file_systems(self, name_starts_with=None, # type: Optional[str]
+ include_metadata=None, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> ItemPaged[FileSystemProperties]
+ """Returns a generator to list the file systems under the specified account.
+
+ The generator will lazily follow the continuation tokens returned by
+ the service and stop when all file systems have been returned.
+
+ :param str name_starts_with:
+ Filters the results to return only file systems whose names
+ begin with the specified prefix.
+ :param bool include_metadata:
+ Specifies that file system metadata be returned in the response.
+ The default value is `False`.
+ :keyword int results_per_page:
+ The maximum number of file system names to retrieve per API
+ call. If the request does not specify the server will return up to 5,000 items per page.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword bool include_deleted:
+ Specifies that deleted file systems to be returned in the response. This is for file system restore enabled
+ account. The default value is `False`.
+ .. versionadded:: 12.3.0
+ :returns: An iterable (auto-paging) of FileSystemProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START list_file_systems]
+ :end-before: [END list_file_systems]
+ :language: python
+ :dedent: 8
+ :caption: Listing the file systems in the datalake service.
+ """
+ item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
+ include_metadata=include_metadata,
+ **kwargs) # pylint: disable=protected-access
+ item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access
+ return item_paged
+
+ def create_file_system(self, file_system, # type: Union[FileSystemProperties, str]
+ metadata=None, # type: Optional[Dict[str, str]]
+ public_access=None, # type: Optional[PublicAccess]
+ **kwargs):
+ # type: (...) -> FileSystemClient
+ """Creates a new file system under the specified account.
+
+ If the file system with the same name already exists, a ResourceExistsError will
+ be raised. This method returns a client with which to interact with the newly
+ created file system.
+
+ :param str file_system:
+ The name of the file system to create.
+ :param metadata:
+ A dict with name-value pairs to associate with the
+ file system as metadata. Example: `{'Category':'test'}`
+ :type metadata: dict(str, str)
+ :param public_access:
+ Possible values include: file system, file.
+ :type public_access: ~azure.storage.filedatalake.PublicAccess
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START create_file_system_from_service_client]
+ :end-before: [END create_file_system_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Creating a file system in the datalake service.
+ """
+ file_system_client = self.get_file_system_client(file_system)
+ file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
+ return file_system_client
+
+ def _rename_file_system(self, name, new_name, **kwargs):
+ # type: (str, str, **Any) -> FileSystemClient
+ """Renames a filesystem.
+
+ Operation is successful only if the source filesystem exists.
+
+ :param str name:
+ The name of the filesystem to rename.
+ :param str new_name:
+ The new filesystem name the user wants to rename to.
+ :keyword lease:
+ Specify this to perform only if the lease ID given
+ matches the active lease ID of the source filesystem.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+ """
+ self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access
+ renamed_file_system = self.get_file_system_client(new_name)
+ return renamed_file_system
+
+ def undelete_file_system(self, name, deleted_version, **kwargs):
+ # type: (str, str, **Any) -> FileSystemClient
+ """Restores soft-deleted filesystem.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ .. versionadded:: 12.3.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :param str name:
+ Specifies the name of the deleted filesystem to restore.
+ :param str deleted_version:
+ Specifies the version of the deleted filesystem to restore.
+ :keyword str new_name:
+ The new name for the deleted filesystem to be restored to.
+ If not specified "name" will be used as the restored filesystem name.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+ """
+ new_name = kwargs.pop('new_name', None)
+ file_system = self.get_file_system_client(new_name or name)
+ self._blob_service_client.undelete_container(
+ name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access
+ return file_system
+
+ def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str]
+ **kwargs):
+ # type: (...) -> FileSystemClient
+ """Marks the specified file system for deletion.
+
+ The file system and any files contained within it are later deleted during garbage collection.
+ If the file system is not found, a ResourceNotFoundError will be raised.
+
+ :param file_system:
+ The file system to delete. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :keyword lease:
+ If specified, delete_file_system only succeeds if the
+ file system's lease is active and matches this ID.
+ Required if the file system has an active lease.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START delete_file_system_from_service_client]
+ :end-before: [END delete_file_system_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Deleting a file system in the datalake service.
+ """
+ file_system_client = self.get_file_system_client(file_system)
+ file_system_client.delete_file_system(**kwargs)
+ return file_system_client
+
+ def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str]
+ ):
+ # type: (...) -> FileSystemClient
+ """Get a client to interact with the specified file system.
+
+ The file system need not already exist.
+
+ :param file_system:
+ The file system. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :returns: A FileSystemClient.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START create_file_system_client_from_service]
+ :end-before: [END create_file_system_client_from_service]
+ :language: python
+ :dedent: 8
+ :caption: Getting the file system client to interact with a specific file system.
+ """
+ try:
+ file_system_name = file_system.name
+ except AttributeError:
+ file_system_name = file_system
+
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return FileSystemClient(self.url, file_system_name, credential=self._raw_credential,
+ _configuration=self._config,
+ _pipeline=_pipeline, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str]
+ directory # type: Union[DirectoryProperties, str]
+ ):
+ # type: (...) -> DataLakeDirectoryClient
+ """Get a client to interact with the specified directory.
+
+ The directory need not already exist.
+
+ :param file_system:
+ The file system that the directory is in. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START get_directory_client_from_service_client]
+ :end-before: [END get_directory_client_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Getting the directory client to interact with a specific directory.
+ """
+ try:
+ file_system_name = file_system.name
+ except AttributeError:
+ file_system_name = file_system
+ try:
+ directory_name = directory.name
+ except AttributeError:
+ directory_name = directory
+
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name,
+ credential=self._raw_credential,
+ _configuration=self._config, _pipeline=_pipeline,
+ _hosts=self._hosts,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function
+ )
+
+ def get_file_client(self, file_system, # type: Union[FileSystemProperties, str]
+ file_path # type: Union[FileProperties, str]
+ ):
+ # type: (...) -> DataLakeFileClient
+ """Get a client to interact with the specified file.
+
+ The file need not already exist.
+
+ :param file_system:
+ The file system that the file is in. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :param file_path:
+ The file with which to interact. This can either be the full path of the file(from the root directory),
+ or an instance of FileProperties. eg. directory/subdirectory/file
+ :type file_path: str or ~azure.storage.filedatalake.FileProperties
+ :returns: A DataLakeFileClient.
+ :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service.py
+ :start-after: [START get_file_client_from_service_client]
+ :end-before: [END get_file_client_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Getting the file client to interact with a specific file.
+ """
+ try:
+ file_system_name = file_system.name
+ except AttributeError:
+ file_system_name = file_system
+ try:
+ file_path = file_path.name
+ except AttributeError:
+ pass
+
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeFileClient(
+ self.url, file_system_name, file_path=file_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ def set_service_properties(self, **kwargs):
+ # type: (**Any) -> None
+ """Sets the properties of a storage account's Datalake service, including
+ Azure Storage Analytics.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ If an element (e.g. analytics_logging) is left as None, the
+ existing settings on the service for that functionality are preserved.
+
+ :keyword analytics_logging:
+ Groups the Azure Analytics Logging settings.
+ :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging
+ :keyword hour_metrics:
+ The hour metrics settings provide a summary of request
+ statistics grouped by API in hourly aggregates.
+ :type hour_metrics: ~azure.storage.filedatalake.Metrics
+ :keyword minute_metrics:
+ The minute metrics settings provide request statistics
+ for each minute.
+ :type minute_metrics: ~azure.storage.filedatalake.Metrics
+ :keyword cors:
+ You can include up to five CorsRule elements in the
+ list. If an empty list is specified, all CORS rules will be deleted,
+ and CORS will be disabled for the service.
+ :type cors: list[~azure.storage.filedatalake.CorsRule]
+ :keyword str target_version:
+ Indicates the default version to use for requests if an incoming
+ request's version is not specified.
+ :keyword delete_retention_policy:
+ The delete retention policy specifies whether to retain deleted files/directories.
+ It also specifies the number of days and versions of file/directory to keep.
+ :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy
+ :keyword static_website:
+ Specifies whether the static website feature is enabled,
+ and if yes, indicates the index document and 404 error document to use.
+ :type static_website: ~azure.storage.filedatalake.StaticWebsite
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ return self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access
+
+ def get_service_properties(self, **kwargs):
+ # type: (**Any) -> Dict[str, Any]
+ """Gets the properties of a storage account's datalake service, including
+ Azure Storage Analytics.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An object containing datalake service properties such as
+ analytics logging, hour/minute metrics, cors rules, etc.
+ :rtype: Dict[str, Any]
+ """
+ props = self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access
+ return get_datalake_service_properties(props)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_deserialize.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_deserialize.py
new file mode 100644
index 00000000000..18ec80fd44f
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_deserialize.py
@@ -0,0 +1,180 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+from typing import ( # pylint: disable=unused-import
+ TYPE_CHECKING
+)
+
+from azure.core.pipeline.policies import ContentDecodePolicy
+from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \
+ ResourceNotFoundError, ResourceExistsError
+from ._models import FileProperties, DirectoryProperties, LeaseProperties, DeletedPathProperties, StaticWebsite, \
+ RetentionPolicy, Metrics, AnalyticsLogging, PathProperties # pylint: disable=protected-access
+from ._shared.models import StorageErrorCode
+
+if TYPE_CHECKING:
+ pass
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def deserialize_dir_properties(response, obj, headers):
+ metadata = deserialize_metadata(response, obj, headers)
+ dir_properties = DirectoryProperties(
+ metadata=metadata,
+ **headers
+ )
+ return dir_properties
+
+
+def deserialize_file_properties(response, obj, headers):
+ metadata = deserialize_metadata(response, obj, headers)
+ file_properties = FileProperties(
+ metadata=metadata,
+ **headers
+ )
+ if 'Content-Range' in headers:
+ if 'x-ms-blob-content-md5' in headers:
+ file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
+ else:
+ file_properties.content_settings.content_md5 = None
+ return file_properties
+
+
+def deserialize_path_properties(path_list):
+ return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access
+
+
+def get_deleted_path_properties_from_generated_code(generated):
+ deleted_path = DeletedPathProperties()
+ deleted_path.name = generated.name
+ deleted_path.deleted_time = generated.properties.deleted_time
+ deleted_path.remaining_retention_days = generated.properties.remaining_retention_days
+ deleted_path.deletion_id = generated.deletion_id
+ return deleted_path
+
+
+def is_file_path(_, __, headers):
+ if headers['x-ms-resource-type'] == "file":
+ return True
+ return False
+
+
+def get_datalake_service_properties(datalake_properties):
+ datalake_properties["analytics_logging"] = AnalyticsLogging._from_generated( # pylint: disable=protected-access
+ datalake_properties["analytics_logging"])
+ datalake_properties["hour_metrics"] = Metrics._from_generated(datalake_properties["hour_metrics"]) # pylint: disable=protected-access
+ datalake_properties["minute_metrics"] = Metrics._from_generated( # pylint: disable=protected-access
+ datalake_properties["minute_metrics"])
+ datalake_properties["delete_retention_policy"] = RetentionPolicy._from_generated( # pylint: disable=protected-access
+ datalake_properties["delete_retention_policy"])
+ datalake_properties["static_website"] = StaticWebsite._from_generated( # pylint: disable=protected-access
+ datalake_properties["static_website"])
+ return datalake_properties
+
+
+def from_blob_properties(blob_properties):
+ file_props = FileProperties()
+ file_props.name = blob_properties.name
+ file_props.etag = blob_properties.etag
+ file_props.deleted = blob_properties.deleted
+ file_props.metadata = blob_properties.metadata
+ file_props.lease = blob_properties.lease
+ file_props.lease.__class__ = LeaseProperties
+ file_props.last_modified = blob_properties.last_modified
+ file_props.creation_time = blob_properties.creation_time
+ file_props.size = blob_properties.size
+ file_props.deleted_time = blob_properties.deleted_time
+ file_props.remaining_retention_days = blob_properties.remaining_retention_days
+ file_props.content_settings = blob_properties.content_settings
+ return file_props
+
+
+def normalize_headers(headers):
+ normalized = {}
+ for key, value in headers.items():
+ if key.startswith('x-ms-'):
+ key = key[5:]
+ normalized[key.lower().replace('-', '_')] = value
+ return normalized
+
+
+def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument
+ try:
+ raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")}
+ except AttributeError:
+ raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
+ return {k[10:]: v for k, v in raw_metadata.items()}
+
+
+def process_storage_error(storage_error):
+ raise_error = HttpResponseError
+ error_code = storage_error.response.headers.get('x-ms-error-code')
+ error_message = storage_error.message
+ additional_data = {}
+ try:
+ error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+ if error_body:
+ for info in error_body:
+ if info == 'code':
+ error_code = error_body[info]
+ elif info == 'message':
+ error_message = error_body[info]
+ else:
+ additional_data[info] = error_body[info]
+ except DecodeError:
+ pass
+
+ try:
+ if error_code:
+ error_code = StorageErrorCode(error_code)
+ if error_code in [StorageErrorCode.condition_not_met]:
+ raise_error = ResourceModifiedError
+ if error_code in [StorageErrorCode.invalid_authentication_info,
+ StorageErrorCode.authentication_failed]:
+ raise_error = ClientAuthenticationError
+ if error_code in [StorageErrorCode.resource_not_found,
+ StorageErrorCode.invalid_property_name,
+ StorageErrorCode.invalid_source_uri,
+ StorageErrorCode.source_path_not_found,
+ StorageErrorCode.lease_name_mismatch,
+ StorageErrorCode.file_system_not_found,
+ StorageErrorCode.path_not_found,
+ StorageErrorCode.parent_not_found,
+ StorageErrorCode.invalid_destination_path,
+ StorageErrorCode.invalid_rename_source_path,
+ StorageErrorCode.lease_is_already_broken,
+ StorageErrorCode.invalid_source_or_destination_resource_type,
+ StorageErrorCode.rename_destination_parent_path_not_found]:
+ raise_error = ResourceNotFoundError
+ if error_code in [StorageErrorCode.account_already_exists,
+ StorageErrorCode.account_being_created,
+ StorageErrorCode.resource_already_exists,
+ StorageErrorCode.resource_type_mismatch,
+ StorageErrorCode.source_path_is_being_deleted,
+ StorageErrorCode.path_already_exists,
+ StorageErrorCode.destination_path_is_being_deleted,
+ StorageErrorCode.file_system_already_exists,
+ StorageErrorCode.file_system_being_deleted,
+ StorageErrorCode.path_conflict]:
+ raise_error = ResourceExistsError
+ except ValueError:
+ # Got an unknown error code
+ pass
+
+ try:
+ error_message += "\nErrorCode:{}".format(error_code.value)
+ except AttributeError:
+ error_message += "\nErrorCode:{}".format(error_code)
+ for name, info in additional_data.items():
+ error_message += "\n{}:{}".format(name, info)
+
+ error = raise_error(message=error_message, response=storage_error.response,
+ continuation_token=storage_error.continuation_token)
+ error.error_code = error_code
+ error.additional_info = additional_data
+ raise error
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_download.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_download.py
new file mode 100644
index 00000000000..61716d3cdb5
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_download.py
@@ -0,0 +1,59 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import Iterator
+
+from ._deserialize import from_blob_properties
+
+
+class StorageStreamDownloader(object):
+ """A streaming object to download from Azure Storage.
+
+ :ivar str name:
+ The name of the file being downloaded.
+ :ivar ~azure.storage.filedatalake.FileProperties properties:
+ The properties of the file being downloaded. If only a range of the data is being
+ downloaded, this will be reflected in the properties.
+ :ivar int size:
+ The size of the total data in the stream. This will be the byte range if speficied,
+ otherwise the total size of the file.
+ """
+
+ def __init__(self, downloader):
+ self._downloader = downloader
+ self.name = self._downloader.name
+ self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access
+ self.size = self._downloader.size
+
+ def __len__(self):
+ return self.size
+
+ def chunks(self):
+ # type: () -> Iterator[bytes]
+ """Iterate over chunks in the download stream.
+
+ :rtype: Iterator[bytes]
+ """
+ return self._downloader.chunks()
+
+ def readall(self):
+ """Download the contents of this file.
+
+ This operation is blocking until all data is downloaded.
+ :rtype: bytes or str
+ """
+ return self._downloader.readall()
+
+ def readinto(self, stream):
+ """Download the contents of this file to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream. The stream must be seekable if the download
+ uses more than one parallel connection.
+ :returns: The number of bytes read.
+ :rtype: int
+ """
+ return self._downloader.readinto(stream)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_file_system_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_file_system_client.py
new file mode 100644
index 00000000000..42887fc0815
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_file_system_client.py
@@ -0,0 +1,917 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import functools
+from typing import Optional, Any, Union
+
+
+try:
+ from urllib.parse import urlparse, quote, unquote
+except ImportError:
+ from urlparse import urlparse # type: ignore
+ from urllib2 import quote, unquote # type: ignore
+import six
+
+from azure.core.pipeline import Pipeline
+from azure.core.exceptions import HttpResponseError
+from azure.core.paging import ItemPaged
+from ...blob import ContainerClient
+from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str
+from ._serialize import convert_dfs_url_to_blob_url
+from ._list_paths_helper import DeletedPathPropertiesPaged
+from ._models import LocationMode, FileSystemProperties, PublicAccess, DeletedPathProperties, FileProperties, \
+ DirectoryProperties
+from ._data_lake_file_client import DataLakeFileClient
+from ._data_lake_directory_client import DataLakeDirectoryClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._generated import AzureDataLakeStorageRESTAPI
+from ._generated.models import ListBlobsIncludeItem
+from ._deserialize import deserialize_path_properties, process_storage_error, is_file_path
+
+
+class FileSystemClient(StorageAccountHostsMixin):
+ """A client to interact with a specific file system, even if that file system
+ may not yet exist.
+
+ For operations relating to a specific directory or file within this file system, a directory client or file client
+ can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
+
+ :ivar str url:
+ The full endpoint URL to the file system, including SAS token if used.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URI to the storage account.
+ :param file_system_name:
+ The file system for the directory or files.
+ :type file_system_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START create_file_system_client_from_service]
+ :end-before: [END create_file_system_client_from_service]
+ :language: python
+ :dedent: 8
+ :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
+ """
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ try:
+ if not account_url.lower().startswith('http'):
+ account_url = "https://" + account_url
+ except AttributeError:
+ raise ValueError("account URL must be a string.")
+ parsed_url = urlparse(account_url.rstrip('/'))
+ if not file_system_name:
+ raise ValueError("Please specify a file system name.")
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(account_url))
+
+ blob_account_url = convert_dfs_url_to_blob_url(account_url)
+ # TODO: add self.account_url to base_client and remove _blob_account_url
+ self._blob_account_url = blob_account_url
+
+ datalake_hosts = kwargs.pop('_hosts', None)
+ blob_hosts = None
+ if datalake_hosts:
+ blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
+ blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""}
+ self._container_client = ContainerClient(blob_account_url, file_system_name,
+ credential=credential, _hosts=blob_hosts, **kwargs)
+
+ _, sas_token = parse_query(parsed_url.query)
+ self.file_system_name = file_system_name
+ self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+ super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
+ _hosts=datalake_hosts, **kwargs)
+ # ADLS doesn't support secondary endpoint, make sure it's empty
+ self._hosts[LocationMode.SECONDARY] = ""
+ self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline)
+ self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url,
+ file_system=file_system_name,
+ pipeline=self._pipeline)
+
+ def _format_url(self, hostname):
+ file_system_name = self.file_system_name
+ if isinstance(file_system_name, six.text_type):
+ file_system_name = file_system_name.encode('UTF-8')
+ return "{}://{}/{}{}".format(
+ self.scheme,
+ hostname,
+ quote(file_system_name),
+ self._query_str)
+
+ def __exit__(self, *args):
+ self._container_client.close()
+ super(FileSystemClient, self).__exit__(*args)
+
+ def close(self):
+ # type: () -> None
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ self._container_client.close()
+ self.__exit__()
+
+ @classmethod
+ def from_connection_string(
+ cls, conn_str, # type: str
+ file_system_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ): # type: (...) -> FileSystemClient
+ """
+ Create FileSystemClient from a Connection String.
+
+ :param str conn_str:
+ A connection string to an Azure Storage account.
+ :param file_system_name: The name of file system to interact with.
+ :type file_system_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :return a FileSystemClient
+ :rtype ~azure.storage.filedatalake.FileSystemClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START create_file_system_client_from_connection_string]
+ :end-before: [END create_file_system_client_from_connection_string]
+ :language: python
+ :dedent: 8
+ :caption: Create FileSystemClient from connection string
+ """
+ account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs')
+ return cls(
+ account_url, file_system_name=file_system_name, credential=credential, **kwargs)
+
+ def acquire_lease(
+ self, lease_duration=-1, # type: int
+ lease_id=None, # type: Optional[str]
+ **kwargs
+ ):
+ # type: (...) -> DataLakeLeaseClient
+ """
+ Requests a new lease. If the file system does not have an active lease,
+ the DataLake service creates a lease on the file system and returns a new
+ lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The DataLake service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+ :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START acquire_lease_on_file_system]
+ :end-before: [END acquire_lease_on_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Acquiring a lease on the file system.
+ """
+ lease = DataLakeLeaseClient(self, lease_id=lease_id)
+ lease.acquire(lease_duration=lease_duration, **kwargs)
+ return lease
+
+ def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]]
+ public_access=None, # type: Optional[PublicAccess]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Creates a new file system under the specified account.
+
+ If the file system with the same name already exists, a ResourceExistsError will
+ be raised. This method returns a client with which to interact with the newly
+ created file system.
+
+ :param metadata:
+ A dict with name-value pairs to associate with the
+ file system as metadata. Example: `{'Category':'test'}`
+ :type metadata: dict(str, str)
+ :param public_access:
+ To specify whether data in the file system may be accessed publicly and the level of access.
+ :type public_access: ~azure.storage.filedatalake.PublicAccess
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START create_file_system]
+ :end-before: [END create_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Creating a file system in the datalake service.
+ """
+ return self._container_client.create_container(metadata=metadata,
+ public_access=public_access,
+ **kwargs)
+
+ def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a file system exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return self._container_client.exists(**kwargs)
+
+ def _rename_file_system(self, new_name, **kwargs):
+ # type: (str, **Any) -> FileSystemClient
+ """Renames a filesystem.
+
+ Operation is successful only if the source filesystem exists.
+
+ :param str new_name:
+ The new filesystem name the user wants to rename to.
+ :keyword lease:
+ Specify this to perform only if the lease ID given
+ matches the active lease ID of the source filesystem.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+ """
+ self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access
+ #TODO: self._raw_credential would not work with SAS tokens
+ renamed_file_system = FileSystemClient(
+ "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name,
+ credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+ return renamed_file_system
+
+ def delete_file_system(self, **kwargs):
+ # type: (Any) -> None
+ """Marks the specified file system for deletion.
+
+ The file system and any files contained within it are later deleted during garbage collection.
+ If the file system is not found, a ResourceNotFoundError will be raised.
+
+ :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+ If specified, delete_file_system only succeeds if the
+ file system's lease is active and matches this ID.
+ Required if the file system has an active lease.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START delete_file_system]
+ :end-before: [END delete_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Deleting a file system in the datalake service.
+ """
+ self._container_client.delete_container(**kwargs)
+
+ def get_file_system_properties(self, **kwargs):
+ # type: (Any) -> FileSystemProperties
+ """Returns all user-defined metadata and system properties for the specified
+ file system. The data returned does not include the file system's list of paths.
+
+ :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+ If specified, get_file_system_properties only succeeds if the
+ file system's lease is active and matches this ID.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Properties for the specified file system within a file system object.
+ :rtype: ~azure.storage.filedatalake.FileSystemProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START get_file_system_properties]
+ :end-before: [END get_file_system_properties]
+ :language: python
+ :dedent: 12
+ :caption: Getting properties on the file system.
+ """
+ container_properties = self._container_client.get_container_properties(**kwargs)
+ return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access
+
+ def set_file_system_metadata( # type: ignore
+ self, metadata, # type: Dict[str, str]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets one or more user-defined name-value pairs for the specified
+ file system. Each call to this operation replaces all existing metadata
+ attached to the file system. To remove all metadata from the file system,
+ call this operation with no metadata dict.
+
+ :param metadata:
+ A dict containing name-value pairs to associate with the file system as
+ metadata. Example: {'category':'test'}
+ :type metadata: dict[str, str]
+ :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease:
+ If specified, set_file_system_metadata only succeeds if the
+ file system's lease is active and matches this ID.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: filesystem-updated property dict (Etag and last modified).
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START set_file_system_metadata]
+ :end-before: [END set_file_system_metadata]
+ :language: python
+ :dedent: 12
+ :caption: Setting metadata on the file system.
+ """
+ return self._container_client.set_container_metadata(metadata=metadata, **kwargs)
+
+ def set_file_system_access_policy(
+ self, signed_identifiers, # type: Dict[str, AccessPolicy]
+ public_access=None, # type: Optional[Union[str, PublicAccess]]
+ **kwargs
+ ): # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets the permissions for the specified file system or stored access
+ policies that may be used with Shared Access Signatures. The permissions
+ indicate whether files in a file system may be accessed publicly.
+
+ :param signed_identifiers:
+ A dictionary of access policies to associate with the file system. The
+ dictionary may contain up to 5 elements. An empty dictionary
+ will clear the access policies set on the service.
+ :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy]
+ :param ~azure.storage.filedatalake.PublicAccess public_access:
+ To specify whether data in the file system may be accessed publicly and the level of access.
+ :keyword lease:
+ Required if the file system has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified date/time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: File System-updated property dict (Etag and last modified).
+ :rtype: dict[str, str or ~datetime.datetime]
+ """
+ return self._container_client.set_container_access_policy(signed_identifiers,
+ public_access=public_access, **kwargs)
+
+ def get_file_system_access_policy(self, **kwargs):
+ # type: (Any) -> Dict[str, Any]
+ """Gets the permissions for the specified file system.
+ The permissions indicate whether file system data may be accessed publicly.
+
+ :keyword lease:
+ If specified, the operation only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Access policy information in a dict.
+ :rtype: dict[str, Any]
+ """
+ access_policy = self._container_client.get_container_access_policy(**kwargs)
+ return {
+ 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access
+ 'signed_identifiers': access_policy['signed_identifiers']
+ }
+
+ def get_paths(self, path=None, # type: Optional[str]
+ recursive=True, # type: Optional[bool]
+ max_results=None, # type: Optional[int]
+ **kwargs):
+ # type: (...) -> ItemPaged[PathProperties]
+ """Returns a generator to list the paths(could be files or directories) under the specified file system.
+ The generator will lazily follow the continuation tokens returned by
+ the service.
+
+ :param str path:
+ Filters the results to return only paths under the specified path.
+ :param int max_results: An optional value that specifies the maximum
+ number of items to return per page. If omitted or greater than 5,000, the
+ response will include up to 5,000 items per page.
+ :keyword upn:
+ Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the user identity values returned
+ in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false. Note that group and
+ application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of PathProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START get_paths_in_file_system]
+ :end-before: [END get_paths_in_file_system]
+ :language: python
+ :dedent: 8
+ :caption: List the paths in the file system.
+ """
+ timeout = kwargs.pop('timeout', None)
+ return self._client.file_system.list_paths(
+ recursive=recursive,
+ max_results=max_results,
+ path=path,
+ timeout=timeout,
+ cls=deserialize_path_properties,
+ **kwargs)
+
+ def create_directory(self, directory, # type: Union[DirectoryProperties, str]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Create directory
+
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START create_directory_from_file_system]
+ :end-before: [END create_directory_from_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Create directory in the file system.
+ """
+ directory_client = self.get_directory_client(directory)
+ directory_client.create_directory(metadata=metadata, **kwargs)
+ return directory_client
+
+ def delete_directory(self, directory, # type: Union[DirectoryProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Marks the specified path for deletion.
+
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START delete_directory_from_file_system]
+ :end-before: [END delete_directory_from_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Delete directory in the file system.
+ """
+ directory_client = self.get_directory_client(directory)
+ directory_client.delete_directory(**kwargs)
+ return directory_client
+
+ def create_file(self, file, # type: Union[FileProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeFileClient
+ """
+ Create file
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties.
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :param ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START create_file_from_file_system]
+ :end-before: [END create_file_from_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Create file in the file system.
+ """
+ file_client = self.get_file_client(file)
+ file_client.create_file(**kwargs)
+ return file_client
+
+ def delete_file(self, file, # type: Union[FileProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeFileClient
+ """
+ Marks the specified file for deletion.
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties.
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START delete_file_from_file_system]
+ :end-before: [END delete_file_from_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Delete file in the file system.
+ """
+ file_client = self.get_file_client(file)
+ file_client.delete_file(**kwargs)
+ return file_client
+
+ def _undelete_path_options(self, deleted_path_name, deletion_id):
+ quoted_path = quote(unquote(deleted_path_name.strip('/')))
+
+ url_and_token = self.url.replace('.dfs.', '.blob.').split('?')
+ try:
+ url = url_and_token[0] + '/' + quoted_path + url_and_token[1]
+ except IndexError:
+ url = url_and_token[0] + '/' + quoted_path
+
+ undelete_source = quoted_path + '?deletionid={}'.format(deletion_id) if deletion_id else None
+
+ return quoted_path, url, undelete_source
+
+ def _undelete_path(self, deleted_path_name, deletion_id, **kwargs):
+ # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient]
+ """Restores soft-deleted path.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ :param str deleted_path_name:
+ Specifies the path (file or directory) to restore.
+ :param str deletion_id:
+ Specifies the version of the deleted path to restore.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient or azure.storage.file.datalake.DataLakeFileClient
+ """
+ _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id)
+
+ pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ path_client = AzureDataLakeStorageRESTAPI(
+ url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline)
+ try:
+ is_file = path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs)
+ if is_file:
+ return self.get_file_client(deleted_path_name)
+ return self.get_directory_client(deleted_path_name)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ def _get_root_directory_client(self):
+ # type: () -> DataLakeDirectoryClient
+ """Get a client to interact with the root directory.
+
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+ """
+ return self.get_directory_client('/')
+
+ def get_directory_client(self, directory # type: Union[DirectoryProperties, str]
+ ):
+ # type: (...) -> DataLakeDirectoryClient
+ """Get a client to interact with the specified directory.
+
+ The directory need not already exist.
+
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START get_directory_client_from_file_system]
+ :end-before: [END get_directory_client_from_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Getting the directory client to interact with a specific directory.
+ """
+ try:
+ directory_name = directory.get('name')
+ except AttributeError:
+ directory_name = str(directory)
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name,
+ credential=self._raw_credential,
+ _configuration=self._config, _pipeline=_pipeline,
+ _hosts=self._hosts,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function
+ )
+
+ def get_file_client(self, file_path # type: Union[FileProperties, str]
+ ):
+ # type: (...) -> DataLakeFileClient
+ """Get a client to interact with the specified file.
+
+ The file need not already exist.
+
+ :param file_path:
+ The file with which to interact. This can either be the path of the file(from root directory),
+ or an instance of FileProperties. eg. directory/subdirectory/file
+ :type file_path: str or ~azure.storage.filedatalake.FileProperties
+ :returns: A DataLakeFileClient.
+ :rtype: ~azure.storage.filedatalake.DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system.py
+ :start-after: [START get_file_client_from_file_system]
+ :end-before: [END get_file_client_from_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Getting the file client to interact with a specific file.
+ """
+ try:
+ file_path = file_path.get('name')
+ except AttributeError:
+ file_path = str(file_path)
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeFileClient(
+ self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ def list_deleted_paths(self, **kwargs):
+ # type: (Any) -> ItemPaged[DeletedPathProperties]
+ """Returns a generator to list the deleted (file or directory) paths under the specified file system.
+ The generator will lazily follow the continuation tokens returned by
+ the service.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ :keyword str path_prefix:
+ Filters the results to return only paths under the specified path.
+ :keyword int max_results:
+ An optional value that specifies the maximum number of items to return per page.
+ If omitted or greater than 5,000, the response will include up to 5,000 items per page.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of DeletedPathProperties.
+ :rtype:
+ ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.DeletedPathProperties]
+ """
+ path_prefix = kwargs.pop('path_prefix', None)
+ results_per_page = kwargs.pop('max_results', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment,
+ showonly=ListBlobsIncludeItem.deleted,
+ timeout=timeout,
+ **kwargs)
+ return ItemPaged(
+ command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged,
+ results_per_page=results_per_page, **kwargs)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/__init__.py
new file mode 100644
index 00000000000..5cd3ae2368b
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/__init__.py
@@ -0,0 +1,16 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI
+__all__ = ['AzureDataLakeStorageRESTAPI']
+
+try:
+ from ._patch import patch_sdk # type: ignore
+ patch_sdk()
+except ImportError:
+ pass
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py
new file mode 100644
index 00000000000..fbd0a7916e6
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py
@@ -0,0 +1,91 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import TYPE_CHECKING
+
+from azure.core import PipelineClient
+from msrest import Deserializer, Serializer
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any
+
+ from azure.core.pipeline.transport import HttpRequest, HttpResponse
+
+from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .operations import ServiceOperations
+from .operations import FileSystemOperations
+from .operations import PathOperations
+from . import models
+
+
+class AzureDataLakeStorageRESTAPI(object):
+ """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+ :ivar service: ServiceOperations operations
+ :vartype service: azure.storage.filedatalake.operations.ServiceOperations
+ :ivar file_system: FileSystemOperations operations
+ :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations
+ :ivar path: PathOperations operations
+ :vartype path: azure.storage.filedatalake.operations.PathOperations
+ :param url: The URL of the service account, container, or blob that is the targe of the desired operation.
+ :type url: str
+ """
+
+ def __init__(
+ self,
+ url, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ base_url = '{url}'
+ self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs)
+ self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self._serialize = Serializer(client_models)
+ self._serialize.client_side_validation = False
+ self._deserialize = Deserializer(client_models)
+
+ self.service = ServiceOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.file_system = FileSystemOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.path = PathOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ def _send_request(self, http_request, **kwargs):
+ # type: (HttpRequest, Any) -> HttpResponse
+ """Runs the network request through the client's chained policies.
+
+ :param http_request: The network request you want to make. Required.
+ :type http_request: ~azure.core.pipeline.transport.HttpRequest
+ :keyword bool stream: Whether the response payload will be streamed. Defaults to True.
+ :return: The response of your network call. Does not do error handling on your response.
+ :rtype: ~azure.core.pipeline.transport.HttpResponse
+ """
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
+ stream = kwargs.pop("stream", True)
+ pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
+ return pipeline_response.http_response
+
+ def close(self):
+ # type: () -> None
+ self._client.close()
+
+ def __enter__(self):
+ # type: () -> AzureDataLakeStorageRESTAPI
+ self._client.__enter__()
+ return self
+
+ def __exit__(self, *exc_details):
+ # type: (Any) -> None
+ self._client.__exit__(*exc_details)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_configuration.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_configuration.py
new file mode 100644
index 00000000000..3bfff366da7
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_configuration.py
@@ -0,0 +1,59 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import TYPE_CHECKING
+
+from azure.core.configuration import Configuration
+from azure.core.pipeline import policies
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any
+
+VERSION = "unknown"
+
+class AzureDataLakeStorageRESTAPIConfiguration(Configuration):
+ """Configuration for AzureDataLakeStorageRESTAPI.
+
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param url: The URL of the service account, container, or blob that is the targe of the desired operation.
+ :type url: str
+ """
+
+ def __init__(
+ self,
+ url, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ if url is None:
+ raise ValueError("Parameter 'url' must not be None.")
+ super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs)
+
+ self.url = url
+ self.resource = "filesystem"
+ self.version = "2020-06-12"
+ kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION))
+ self._configure(**kwargs)
+
+ def _configure(
+ self,
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
+ self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
+ self.authentication_policy = kwargs.get('authentication_policy')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_data_lake_storage_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_data_lake_storage_client.py
new file mode 100644
index 00000000000..805cafcbad0
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/_data_lake_storage_client.py
@@ -0,0 +1,67 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core import PipelineClient
+from msrest import Serializer, Deserializer
+
+from ._configuration import DataLakeStorageClientConfiguration
+from azure.core.exceptions import map_error
+from .operations import ServiceOperations
+from .operations import FileSystemOperations
+from .operations import PathOperations
+from . import models
+
+
+class DataLakeStorageClient(object):
+ """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+
+ :ivar service: Service operations
+ :vartype service: azure.storage.filedatalake.operations.ServiceOperations
+ :ivar file_system: FileSystem operations
+ :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations
+ :ivar path: Path operations
+ :vartype path: azure.storage.filedatalake.operations.PathOperations
+
+ :param url: The URL of the service account, container, or blob that is the
+ targe of the desired operation.
+ :type url: str
+ :param file_system: The filesystem identifier.
+ :type file_system: str
+ :param path1: The file or directory path.
+ :type path1: str
+ """
+
+ def __init__(self, url, file_system, path1, **kwargs):
+
+ base_url = '{url}'
+ self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs)
+ self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = '2020-06-12'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
+ self.service = ServiceOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.file_system = FileSystemOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.path = PathOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ def close(self):
+ self._client.close()
+ def __enter__(self):
+ self._client.__enter__()
+ return self
+ def __exit__(self, *exc_details):
+ self._client.__exit__(*exc_details)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/__init__.py
new file mode 100644
index 00000000000..24daed3d540
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/__init__.py
@@ -0,0 +1,10 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI
+__all__ = ['AzureDataLakeStorageRESTAPI']
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py
new file mode 100644
index 00000000000..efeeeb3b536
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py
@@ -0,0 +1,81 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any
+
+from azure.core import AsyncPipelineClient
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+from msrest import Deserializer, Serializer
+
+from ._configuration import AzureDataLakeStorageRESTAPIConfiguration
+from .operations import ServiceOperations
+from .operations import FileSystemOperations
+from .operations import PathOperations
+from .. import models
+
+
+class AzureDataLakeStorageRESTAPI(object):
+ """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+ :ivar service: ServiceOperations operations
+ :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations
+ :ivar file_system: FileSystemOperations operations
+ :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations
+ :ivar path: PathOperations operations
+ :vartype path: azure.storage.filedatalake.aio.operations.PathOperations
+ :param url: The URL of the service account, container, or blob that is the targe of the desired operation.
+ :type url: str
+ """
+
+ def __init__(
+ self,
+ url: str,
+ **kwargs: Any
+ ) -> None:
+ base_url = '{url}'
+ self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs)
+ self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self._serialize = Serializer(client_models)
+ self._serialize.client_side_validation = False
+ self._deserialize = Deserializer(client_models)
+
+ self.service = ServiceOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.file_system = FileSystemOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.path = PathOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
+ """Runs the network request through the client's chained policies.
+
+ :param http_request: The network request you want to make. Required.
+ :type http_request: ~azure.core.pipeline.transport.HttpRequest
+ :keyword bool stream: Whether the response payload will be streamed. Defaults to True.
+ :return: The response of your network call. Does not do error handling on your response.
+ :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
+ """
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
+ stream = kwargs.pop("stream", True)
+ pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
+ return pipeline_response.http_response
+
+ async def close(self) -> None:
+ await self._client.close()
+
+ async def __aenter__(self) -> "AzureDataLakeStorageRESTAPI":
+ await self._client.__aenter__()
+ return self
+
+ async def __aexit__(self, *exc_details) -> None:
+ await self._client.__aexit__(*exc_details)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_configuration.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_configuration.py
new file mode 100644
index 00000000000..82234727a63
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_configuration.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any
+
+from azure.core.configuration import Configuration
+from azure.core.pipeline import policies
+
+VERSION = "unknown"
+
+class AzureDataLakeStorageRESTAPIConfiguration(Configuration):
+ """Configuration for AzureDataLakeStorageRESTAPI.
+
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param url: The URL of the service account, container, or blob that is the targe of the desired operation.
+ :type url: str
+ """
+
+ def __init__(
+ self,
+ url: str,
+ **kwargs: Any
+ ) -> None:
+ if url is None:
+ raise ValueError("Parameter 'url' must not be None.")
+ super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs)
+
+ self.url = url
+ self.resource = "filesystem"
+ self.version = "2020-06-12"
+ kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION))
+ self._configure(**kwargs)
+
+ def _configure(
+ self,
+ **kwargs: Any
+ ) -> None:
+ self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
+ self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
+ self.authentication_policy = kwargs.get('authentication_policy')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_configuration_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_configuration_async.py
new file mode 100644
index 00000000000..8b4b66bbb15
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_configuration_async.py
@@ -0,0 +1,63 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.configuration import Configuration
+from azure.core.pipeline import policies
+
+from ..version import VERSION
+
+
+class DataLakeStorageClientConfiguration(Configuration):
+ """Configuration for DataLakeStorageClient
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param url: The URL of the service account, container, or blob that is the
+ targe of the desired operation.
+ :type url: str
+ :param file_system: The filesystem identifier.
+ :type file_system: str
+ :param path1: The file or directory path.
+ :type path1: str
+ :ivar resource: The value must be "filesystem" for all filesystem
+ operations.
+ :type resource: str
+ :ivar version: Specifies the version of the operation to use for this
+ request.
+ :type version: str
+ """
+
+ def __init__(self, url, file_system, path1, **kwargs):
+
+ if url is None:
+ raise ValueError("Parameter 'url' must not be None.")
+
+ super(DataLakeStorageClientConfiguration, self).__init__(**kwargs)
+ self._configure(**kwargs)
+
+ self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION))
+ self.generate_client_request_id = True
+ self.accept_language = None
+
+ self.url = url
+ self.file_system = file_system
+ self.path1 = path1
+ self.resource = "filesystem"
+ self.version = "2020-06-12"
+
+ def _configure(self, **kwargs):
+ self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_data_lake_storage_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_data_lake_storage_client_async.py
new file mode 100644
index 00000000000..3b5a7a996a0
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/_data_lake_storage_client_async.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core import AsyncPipelineClient
+from msrest import Serializer, Deserializer
+
+from ._configuration_async import DataLakeStorageClientConfiguration
+from azure.core.exceptions import map_error
+from .operations_async import ServiceOperations
+from .operations_async import FileSystemOperations
+from .operations_async import PathOperations
+from .. import models
+
+
+class DataLakeStorageClient(object):
+ """Azure Data Lake Storage provides storage for Hadoop and other big data workloads.
+
+
+ :ivar service: Service operations
+ :vartype service: azure.storage.filedatalake.aio.operations_async.ServiceOperations
+ :ivar file_system: FileSystem operations
+ :vartype file_system: azure.storage.filedatalake.aio.operations_async.FileSystemOperations
+ :ivar path: Path operations
+ :vartype path: azure.storage.filedatalake.aio.operations_async.PathOperations
+
+ :param url: The URL of the service account, container, or blob that is the
+ targe of the desired operation.
+ :type url: str
+ :param file_system: The filesystem identifier.
+ :type file_system: str
+ :param path1: The file or directory path.
+ :type path1: str
+ """
+
+ def __init__(
+ self, url, file_system, path1, **kwargs):
+
+ base_url = '{url}'
+ self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs)
+ self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = '2020-06-12'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
+ self.service = ServiceOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.file_system = FileSystemOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.path = PathOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ async def close(self):
+ await self._client.close()
+ async def __aenter__(self):
+ await self._client.__aenter__()
+ return self
+ async def __aexit__(self, *exc_details):
+ await self._client.__aexit__(*exc_details)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/__init__.py
new file mode 100644
index 00000000000..0db71e00342
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/__init__.py
@@ -0,0 +1,17 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._service_operations import ServiceOperations
+from ._file_system_operations import FileSystemOperations
+from ._path_operations import PathOperations
+
+__all__ = [
+ 'ServiceOperations',
+ 'FileSystemOperations',
+ 'PathOperations',
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py
new file mode 100644
index 00000000000..d4e206a6d05
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py
@@ -0,0 +1,631 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
+import warnings
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+
+from ... import models as _models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class FileSystemOperations:
+ """FileSystemOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~azure.storage.filedatalake.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = _models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ async def create(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ properties: Optional[str] = None,
+ **kwargs
+ ) -> None:
+ """Create FileSystem.
+
+ Create a FileSystem rooted at the specified location. If the FileSystem already exists, the
+ operation fails. This operation does not support conditional HTTP requests.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ accept = "application/json"
+
+ # Construct URL
+ url = self.create.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ create.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ async def set_properties(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ properties: Optional[str] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Set FileSystem Properties.
+
+ Set properties for the FileSystem. This operation supports conditional HTTP requests. For
+ more information, see `Specifying Conditional Headers for Blob Service Operations
+ `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if modified_access_conditions is not None:
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_properties.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ set_properties.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ async def get_properties(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ **kwargs
+ ) -> None:
+ """Get FileSystem Properties.
+
+ All system and user-defined filesystem properties are specified in the response headers.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get_properties.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ get_properties.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ async def delete(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Delete FileSystem.
+
+ Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same
+ identifier cannot be created for at least 30 seconds. While the filesystem is being deleted,
+ attempts to create a filesystem with the same identifier will fail with status code 409
+ (Conflict), with the service returning additional error information indicating that the
+ filesystem is being deleted. All other operations, including operations on any files or
+ directories within the filesystem, will fail with status code 404 (Not Found) while the
+ filesystem is being deleted. This operation supports conditional HTTP requests. For more
+ information, see `Specifying Conditional Headers for Blob Service Operations
+ `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if modified_access_conditions is not None:
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.delete.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ delete.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ def list_paths(
+ self,
+ recursive: bool,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ continuation: Optional[str] = None,
+ path: Optional[str] = None,
+ max_results: Optional[int] = None,
+ upn: Optional[bool] = None,
+ **kwargs
+ ) -> AsyncIterable["_models.PathList"]:
+ """List Paths.
+
+ List FileSystem paths and their properties.
+
+ :param recursive: Required.
+ :type recursive: bool
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param path: Optional. Filters results to paths within the specified directory. An error
+ occurs if the directory does not exist.
+ :type path: str
+ :param max_results: An optional value that specifies the maximum number of items to return. If
+ omitted or greater than 5,000, the response will include up to 5,000 items.
+ :type max_results: int
+ :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+ "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+ headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If
+ "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+ false. Note that group and application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either PathList or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.PathList]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ accept = "application/json"
+
+ # TODO: change this once continuation/next_link autorest PR is merged
+ def prepare_request(next_link=None, cont_token=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter",
+ request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version,
+ 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_paths.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource,
+ 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ # TODO: change this once continuation/next_link autorest PR is merged
+ if cont_token is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str')
+ if path is not None:
+ query_parameters['directory'] = self._serialize.query("path", path, 'str')
+ query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ # TODO: change this once continuation/next_link autorest PR is merged
+ try:
+ cont_token = pipeline_response.http_response.headers['x-ms-continuation']
+ except KeyError:
+ cont_token = None
+ deserialized = self._deserialize('PathList', pipeline_response)
+ list_of_elem = deserialized.paths
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return cont_token, AsyncList(list_of_elem)
+
+ # TODO: change this once continuation/next_link autorest PR is merged
+ async def get_next(cont_token=None):
+ cont_token = cont_token if not continuation else continuation
+ request = prepare_request(cont_token=cont_token)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, model=error)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list_paths.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ async def list_blob_hierarchy_segment(
+ self,
+ prefix: Optional[str] = None,
+ delimiter: Optional[str] = None,
+ marker: Optional[str] = None,
+ max_results: Optional[int] = None,
+ include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None,
+ showonly: Optional[str] = "deleted",
+ timeout: Optional[int] = None,
+ request_id_parameter: Optional[str] = None,
+ **kwargs
+ ) -> "_models.ListBlobsHierarchySegmentResponse":
+ """The List Blobs operation returns a list of the blobs under the specified container.
+
+ :param prefix: Filters results to filesystems within the specified prefix.
+ :type prefix: str
+ :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+ element in the response body that acts as a placeholder for all blobs whose names begin with
+ the same substring up to the appearance of the delimiter character. The delimiter may be a
+ single character or a string.
+ :type delimiter: str
+ :param marker: A string value that identifies the portion of the list of containers to be
+ returned with the next listing operation. The operation returns the NextMarker value within the
+ response body if the listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value for the marker parameter
+ in a subsequent call to request the next page of list items. The marker value is opaque to the
+ client.
+ :type marker: str
+ :param max_results: An optional value that specifies the maximum number of items to return. If
+ omitted or greater than 5,000, the response will include up to 5,000 items.
+ :type max_results: int
+ :param include: Include this parameter to specify one or more datasets to include in the
+ response.
+ :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+ :param showonly: Include this parameter to specify one or more datasets to include in the
+ response.
+ :type showonly: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ListBlobsHierarchySegmentResponse, or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ restype = "container"
+ comp = "list"
+ accept = "application/xml"
+
+ # Construct URL
+ url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if delimiter is not None:
+ query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',')
+ if showonly is not None:
+ query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py
new file mode 100644
index 00000000000..7cec5890e12
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py
@@ -0,0 +1,1773 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+
+from ... import models as _models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class PathOperations:
+ """PathOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~azure.storage.filedatalake.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = _models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ async def create(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ resource: Optional[Union[str, "_models.PathResourceType"]] = None,
+ continuation: Optional[str] = None,
+ mode: Optional[Union[str, "_models.PathRenameMode"]] = None,
+ rename_source: Optional[str] = None,
+ source_lease_id: Optional[str] = None,
+ properties: Optional[str] = None,
+ permissions: Optional[str] = None,
+ umask: Optional[str] = None,
+ path_http_headers: Optional["_models.PathHTTPHeaders"] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Create File | Create Directory | Rename File | Rename Directory.
+
+ Create or rename a file or directory. By default, the destination is overwritten and if the
+ destination already exists and has a lease the lease is broken. This operation supports
+ conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob
+ Service Operations `_. To fail if the destination already exists,
+ use a conditional request with If-None-Match: "*".
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param resource: Required only for Create File and Create Directory. The value must be "file"
+ or "directory".
+ :type resource: str or ~azure.storage.filedatalake.models.PathResourceType
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param mode: Optional. Valid only when namespace is enabled. This parameter determines the
+ behavior of the rename operation. The value must be "legacy" or "posix", and the default value
+ will be "posix".
+ :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+ :param rename_source: An optional file or directory to be renamed. The value must have the
+ following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties
+ will overwrite the existing properties; otherwise, the existing properties will be preserved.
+ This value must be a URL percent-encoded string. Note that the string may only contain ASCII
+ characters in the ISO-8859-1 character set.
+ :type rename_source: str
+ :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+ an active lease and the lease ID must match.
+ :type source_lease_id: str
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+ account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+ Each class may be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ :type permissions: str
+ :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL, the umask
+ restricts the permissions of the file or directory to be created. The resulting permission is
+ given by p bitwise and not u, where p is the permission and u is the umask. For example, if p
+ is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777
+ for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ in 4-digit octal notation (e.g. 0766).
+ :type umask: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Parameter group.
+ :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _cache_control = None
+ _content_encoding = None
+ _content_language = None
+ _content_disposition = None
+ _content_type = None
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ _source_if_match = None
+ _source_if_none_match = None
+ _source_if_modified_since = None
+ _source_if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if path_http_headers is not None:
+ _cache_control = path_http_headers.cache_control
+ _content_encoding = path_http_headers.content_encoding
+ _content_language = path_http_headers.content_language
+ _content_disposition = path_http_headers.content_disposition
+ _content_type = path_http_headers.content_type
+ if source_modified_access_conditions is not None:
+ _source_if_match = source_modified_access_conditions.source_if_match
+ _source_if_none_match = source_modified_access_conditions.source_if_none_match
+ _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.create.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if resource is not None:
+ query_parameters['resource'] = self._serialize.query("resource", resource, 'str')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if mode is not None:
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
+ if _content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
+ if _content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
+ if _content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
+ if _content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
+ if rename_source is not None:
+ header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if source_lease_id is not None:
+ header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ if _source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str')
+ if _source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str')
+ if _source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123')
+ if _source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def update(
+ self,
+ action: Union[str, "_models.PathUpdateAction"],
+ mode: Union[str, "_models.PathSetAccessControlRecursiveMode"],
+ body: IO,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ max_records: Optional[int] = None,
+ continuation: Optional[str] = None,
+ force_flag: Optional[bool] = None,
+ position: Optional[int] = None,
+ retain_uncommitted_data: Optional[bool] = None,
+ close: Optional[bool] = None,
+ content_length: Optional[int] = None,
+ properties: Optional[str] = None,
+ owner: Optional[str] = None,
+ group: Optional[str] = None,
+ permissions: Optional[str] = None,
+ acl: Optional[str] = None,
+ path_http_headers: Optional["_models.PathHTTPHeaders"] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> Optional["_models.SetAccessControlRecursiveResponse"]:
+ """Append Data | Flush Data | Set Properties | Set Access Control.
+
+ Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file,
+ sets properties for a file or directory, or sets access control for a file or directory. Data
+ can only be appended to a file. Concurrent writes to the same file using multiple clients are
+ not supported. This operation supports conditional HTTP requests. For more information, see
+ `Specifying Conditional Headers for Blob Service Operations `_.
+
+ :param action: The action must be "append" to upload data to be appended to a file, "flush" to
+ flush previously uploaded data to a file, "setProperties" to set the properties of a file or
+ directory, "setAccessControl" to set the owner, group, permissions, or access control list for
+ a file or directory, or "setAccessControlRecursive" to set the access control list for a
+ directory recursively. Note that Hierarchical Namespace must be enabled for the account in
+ order to use access control. Also note that the Access Control List (ACL) includes permissions
+ for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers
+ are mutually exclusive.
+ :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction
+ :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+ modifies one or more POSIX access control rights that pre-exist on files and directories,
+ "remove" removes one or more POSIX access control rights that were present earlier on files
+ and directories.
+ :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+ :param body: Initial data.
+ :type body: IO
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the
+ maximum number of files or directories on which the acl change will be applied. If omitted or
+ greater than 2,000, the request will process up to 2,000 items.
+ :type max_records: int
+ :param continuation: Optional. The number of paths processed with each invocation is limited.
+ If the number of paths to be processed exceeds this limit, a continuation token is returned in
+ the response header x-ms-continuation. When a continuation token is returned in the response,
+ it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive
+ operation.
+ :type continuation: str
+ :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+ the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+ will ignore user errors and proceed with the operation on other sub-entities of the directory.
+ Continuation token will only be returned when forceFlag is true in case of user errors. If not
+ set the default value is false for this.
+ :type force_flag: bool
+ :param position: This parameter allows the caller to upload data in parallel and control the
+ order in which it is appended to the file. It is required when uploading data to be appended
+ to the file and when flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not immediately flushed, or
+ written, to the file. To flush, the previously uploaded data must be contiguous, the position
+ parameter must be specified and equal to the length of the file after all data has been
+ written, and there must not be a request entity body included with the request.
+ :type position: long
+ :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data
+ is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+ after the flush operation. The default is false. Data at offsets less than the specified
+ position are written to the file when flush succeeds, but this optional parameter allows data
+ after the flush position to be retained for a future flush operation.
+ :type retain_uncommitted_data: bool
+ :param close: Azure Storage Events allow applications to receive notifications when files
+ change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+ property indicating whether this is the final change to distinguish the difference between an
+ intermediate flush to a file stream and the final close of a file stream. The close query
+ parameter is valid only when the action is "flush" and change notifications are enabled. If the
+ value of close is "true" and the flush operation completes successfully, the service raises a
+ file change notification with a property indicating that this is the final update (the file
+ stream has been closed). If "false" a change notification is raised indicating the file has
+ changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+ indicate that the file stream has been closed.".
+ :type close: bool
+ :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush
+ Data". Must be the length of the request content in bytes for "Append Data".
+ :type content_length: long
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+ account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+ Each class may be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ :type permissions: str
+ :param acl: Sets POSIX access control rights on files and directories. The value is a comma-
+ separated list of access control entries. Each access control entry (ACE) consists of a scope,
+ a type, a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: SetAccessControlRecursiveResponse, or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _content_md5 = None
+ _lease_id = None
+ _cache_control = None
+ _content_type = None
+ _content_disposition = None
+ _content_encoding = None
+ _content_language = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if path_http_headers is not None:
+ _content_md5 = path_http_headers.content_md5
+ _cache_control = path_http_headers.cache_control
+ _content_type = path_http_headers.content_type
+ _content_disposition = path_http_headers.content_disposition
+ _content_encoding = path_http_headers.content_encoding
+ _content_language = path_http_headers.content_language
+ content_type = kwargs.pop("content_type", "application/octet-stream")
+ accept = "application/json"
+
+ # Construct URL
+ url = self.update.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if max_records is not None:
+ query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1)
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'str')
+ if force_flag is not None:
+ query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool')
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if retain_uncommitted_data is not None:
+ query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
+ if close is not None:
+ query_parameters['close'] = self._serialize.query("close", close, 'bool')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if _content_md5 is not None:
+ header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
+ if _content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
+ if _content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
+ if _content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
+ if _content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ body_content_kwargs['stream_content'] = body
+ request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ deserialized = None
+ if response.status_code == 200:
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response)
+
+ if response.status_code == 202:
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def lease(
+ self,
+ x_ms_lease_action: Union[str, "_models.PathLeaseAction"],
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ x_ms_lease_duration: Optional[int] = None,
+ x_ms_lease_break_period: Optional[int] = None,
+ proposed_lease_id: Optional[str] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Lease Path.
+
+ Create and manage a lease to restrict write and delete access to the path. This operation
+ supports conditional HTTP requests. For more information, see `Specifying Conditional Headers
+ for Blob Service Operations `_.
+
+ :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew",
+ and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration"
+ to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the
+ lease break period is allowed to elapse, during which time no lease operation except break and
+ release can be performed on the file. When a lease is successfully broken, the response
+ indicates the interval in seconds until a new lease can be acquired. Use "change" and specify
+ the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to
+ change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+ existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease.
+ :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+ the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or
+ -1 for infinite lease.
+ :type x_ms_lease_duration: int
+ :param x_ms_lease_break_period: The lease break period duration is optional to break a lease,
+ and specifies the break period of the lease in seconds. The lease break duration must be
+ between 0 and 60 seconds.
+ :type x_ms_lease_break_period: int
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+ Constructor (String) for a list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.lease.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str')
+ if x_ms_lease_duration is not None:
+ header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int')
+ if x_ms_lease_break_period is not None:
+ header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if proposed_lease_id is not None:
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
+
+ if response.status_code == 201:
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
+
+ if response.status_code == 202:
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def read(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ range: Optional[str] = None,
+ x_ms_range_get_content_md5: Optional[bool] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> IO:
+ """Read File.
+
+ Read the contents of a file. For read operations, range requests are supported. This operation
+ supports conditional HTTP requests. For more information, see `Specifying Conditional Headers
+ for Blob Service Operations `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param range: The HTTP Range request header specifies one or more byte ranges of the resource
+ to be retrieved.
+ :type range: str
+ :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified
+ together with the Range header, the service returns the MD5 hash for the range, as long as the
+ range is less than or equal to 4MB in size. If this header is specified without the Range
+ header, the service returns status code 400 (Bad Request). If this header is set to true when
+ the range exceeds 4 MB in size, the service returns status code 400 (Bad Request).
+ :type x_ms_range_get_content_md5: bool
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: IO, or the result of cls(response)
+ :rtype: IO
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[IO]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.read.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if range is not None:
+ header_parameters['Range'] = self._serialize.header("range", range, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if x_ms_range_get_content_md5 is not None:
+ header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 206]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
+ response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
+ response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
+ deserialized = response.stream_download(self._client._pipeline)
+
+ if response.status_code == 206:
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
+ response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
+ response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
+ deserialized = response.stream_download(self._client._pipeline)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def get_properties(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ action: Optional[Union[str, "_models.PathGetPropertiesAction"]] = None,
+ upn: Optional[bool] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Get Properties | Get Status | Get Access Control List.
+
+ Get Properties returns all system and user defined properties for a path. Get Status returns
+ all system defined properties for a path. Get Access Control List returns the access control
+ list for a path. This operation supports conditional HTTP requests. For more information, see
+ `Specifying Conditional Headers for Blob Service Operations `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param action: Optional. If the value is "getStatus" only the system defined properties for the
+ path are returned. If the value is "getAccessControl" the access control list is returned in
+ the response headers (Hierarchical Namespace must be enabled for the account), otherwise the
+ properties are returned.
+ :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction
+ :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+ "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+ headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If
+ "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+ false. Note that group and application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get_properties.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if action is not None:
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner'))
+ response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group'))
+ response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions'))
+ response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl'))
+ response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
+ response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
+ response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def delete(
+ self,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ recursive: Optional[bool] = None,
+ continuation: Optional[str] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Delete File | Delete Directory.
+
+ Delete the file or directory. This operation supports conditional HTTP requests. For more
+ information, see `Specifying Conditional Headers for Blob Service Operations
+ `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param recursive: Required.
+ :type recursive: bool
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.delete.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if recursive is not None:
+ query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def set_access_control(
+ self,
+ timeout: Optional[int] = None,
+ owner: Optional[str] = None,
+ group: Optional[str] = None,
+ permissions: Optional[str] = None,
+ acl: Optional[str] = None,
+ request_id_parameter: Optional[str] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Set the owner, group, permissions, or access control list for a path.
+
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+ account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+ Each class may be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ :type permissions: str
+ :param acl: Sets POSIX access control rights on files and directories. The value is a comma-
+ separated list of access control entries. Each access control entry (ACE) consists of a scope,
+ a type, a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ action = "setAccessControl"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_access_control.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def set_access_control_recursive(
+ self,
+ mode: Union[str, "_models.PathSetAccessControlRecursiveMode"],
+ timeout: Optional[int] = None,
+ continuation: Optional[str] = None,
+ force_flag: Optional[bool] = None,
+ max_records: Optional[int] = None,
+ acl: Optional[str] = None,
+ request_id_parameter: Optional[str] = None,
+ **kwargs
+ ) -> "_models.SetAccessControlRecursiveResponse":
+ """Set the access control list for a path and subpaths.
+
+ :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+ modifies one or more POSIX access control rights that pre-exist on files and directories,
+ "remove" removes one or more POSIX access control rights that were present earlier on files
+ and directories.
+ :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+ the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+ will ignore user errors and proceed with the operation on other sub-entities of the directory.
+ Continuation token will only be returned when forceFlag is true in case of user errors. If not
+ set the default value is false for this.
+ :type force_flag: bool
+ :param max_records: Optional. It specifies the maximum number of files or directories on which
+ the acl change will be applied. If omitted or greater than 2,000, the request will process up
+ to 2,000 items.
+ :type max_records: int
+ :param acl: Sets POSIX access control rights on files and directories. The value is a comma-
+ separated list of access control entries. Each access control entry (ACE) consists of a scope,
+ a type, a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: SetAccessControlRecursiveResponse, or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ action = "setAccessControlRecursive"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_access_control_recursive.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'str')
+ if force_flag is not None:
+ query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool')
+ if max_records is not None:
+ query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def flush_data(
+ self,
+ timeout: Optional[int] = None,
+ position: Optional[int] = None,
+ retain_uncommitted_data: Optional[bool] = None,
+ close: Optional[bool] = None,
+ content_length: Optional[int] = None,
+ request_id_parameter: Optional[str] = None,
+ path_http_headers: Optional["_models.PathHTTPHeaders"] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Set the owner, group, permissions, or access control list for a path.
+
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param position: This parameter allows the caller to upload data in parallel and control the
+ order in which it is appended to the file. It is required when uploading data to be appended
+ to the file and when flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not immediately flushed, or
+ written, to the file. To flush, the previously uploaded data must be contiguous, the position
+ parameter must be specified and equal to the length of the file after all data has been
+ written, and there must not be a request entity body included with the request.
+ :type position: long
+ :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data
+ is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+ after the flush operation. The default is false. Data at offsets less than the specified
+ position are written to the file when flush succeeds, but this optional parameter allows data
+ after the flush position to be retained for a future flush operation.
+ :type retain_uncommitted_data: bool
+ :param close: Azure Storage Events allow applications to receive notifications when files
+ change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+ property indicating whether this is the final change to distinguish the difference between an
+ intermediate flush to a file stream and the final close of a file stream. The close query
+ parameter is valid only when the action is "flush" and change notifications are enabled. If the
+ value of close is "true" and the flush operation completes successfully, the service raises a
+ file change notification with a property indicating that this is the final update (the file
+ stream has been closed). If "false" a change notification is raised indicating the file has
+ changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+ indicate that the file stream has been closed.".
+ :type close: bool
+ :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush
+ Data". Must be the length of the request content in bytes for "Append Data".
+ :type content_length: long
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _content_md5 = None
+ _lease_id = None
+ _cache_control = None
+ _content_type = None
+ _content_disposition = None
+ _content_encoding = None
+ _content_language = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if path_http_headers is not None:
+ _content_md5 = path_http_headers.content_md5
+ _cache_control = path_http_headers.cache_control
+ _content_type = path_http_headers.content_type
+ _content_disposition = path_http_headers.content_disposition
+ _content_encoding = path_http_headers.content_encoding
+ _content_language = path_http_headers.content_language
+ action = "flush"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.flush_data.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if retain_uncommitted_data is not None:
+ query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
+ if close is not None:
+ query_parameters['close'] = self._serialize.query("close", close, 'bool')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if _content_md5 is not None:
+ header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
+ if _content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
+ if _content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
+ if _content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
+ if _content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def append_data(
+ self,
+ body: IO,
+ position: Optional[int] = None,
+ timeout: Optional[int] = None,
+ content_length: Optional[int] = None,
+ transactional_content_crc64: Optional[bytearray] = None,
+ request_id_parameter: Optional[str] = None,
+ path_http_headers: Optional["_models.PathHTTPHeaders"] = None,
+ lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None,
+ **kwargs
+ ) -> None:
+ """Append data to the file.
+
+ :param body: Initial data.
+ :type body: IO
+ :param position: This parameter allows the caller to upload data in parallel and control the
+ order in which it is appended to the file. It is required when uploading data to be appended
+ to the file and when flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not immediately flushed, or
+ written, to the file. To flush, the previously uploaded data must be contiguous, the position
+ parameter must be specified and equal to the length of the file after all data has been
+ written, and there must not be a request entity body included with the request.
+ :type position: long
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush
+ Data". Must be the length of the request content in bytes for "Append Data".
+ :type content_length: long
+ :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+ validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _transactional_content_hash = None
+ _lease_id = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if path_http_headers is not None:
+ _transactional_content_hash = path_http_headers.transactional_content_hash
+ action = "append"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self.append_data.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if _transactional_content_hash is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ body_content_kwargs['stream_content'] = body
+ request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5'))
+ response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64'))
+ response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def set_expiry(
+ self,
+ expiry_options: Union[str, "_models.PathExpiryOptions"],
+ timeout: Optional[int] = None,
+ request_id_parameter: Optional[str] = None,
+ expires_on: Optional[str] = None,
+ **kwargs
+ ) -> None:
+ """Sets the time a blob will expire and be deleted.
+
+ :param expiry_options: Required. Indicates mode of the expiry time.
+ :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param expires_on: The time to set the blob to expiry.
+ :type expires_on: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ comp = "expiry"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_expiry.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str')
+ if expires_on is not None:
+ header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ async def undelete(
+ self,
+ timeout: Optional[int] = None,
+ undelete_source: Optional[str] = None,
+ request_id_parameter: Optional[str] = None,
+ **kwargs
+ ) -> None:
+ """Undelete a path that was previously soft deleted.
+
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of
+ the soft deleted blob to undelete.
+ :type undelete_source: str
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ comp = "undelete"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.undelete.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if undelete_source is not None:
+ header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py
new file mode 100644
index 00000000000..f8ae878a37c
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py
@@ -0,0 +1,148 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
+import warnings
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+
+from ... import models as _models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class ServiceOperations:
+ """ServiceOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~azure.storage.filedatalake.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = _models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list_file_systems(
+ self,
+ prefix: Optional[str] = None,
+ continuation: Optional[str] = None,
+ max_results: Optional[int] = None,
+ request_id_parameter: Optional[str] = None,
+ timeout: Optional[int] = None,
+ **kwargs
+ ) -> AsyncIterable["_models.FileSystemList"]:
+ """List FileSystems.
+
+ List filesystems and their properties in given account.
+
+ :param prefix: Filters results to filesystems within the specified prefix.
+ :type prefix: str
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param max_results: An optional value that specifies the maximum number of items to return. If
+ omitted or greater than 5,000, the response will include up to 5,000 items.
+ :type max_results: int
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FileSystemList or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystemList]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ resource = "account"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_file_systems.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("resource", resource, 'str')
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize('FileSystemList', pipeline_response)
+ list_of_elem = deserialized.filesystems
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, model=error)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list_file_systems.metadata = {'url': '/'} # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/__init__.py
new file mode 100644
index 00000000000..1190e524ea2
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/__init__.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._service_operations_async import ServiceOperations
+from ._file_system_operations_async import FileSystemOperations
+from ._path_operations_async import PathOperations
+
+__all__ = [
+ 'ServiceOperations',
+ 'FileSystemOperations',
+ 'PathOperations',
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_file_system_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_file_system_operations_async.py
new file mode 100644
index 00000000000..715b6a89dfc
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_file_system_operations_async.py
@@ -0,0 +1,580 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class FileSystemOperations:
+ """FileSystemOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar restype: . Constant value: "container".
+ :ivar comp: . Constant value: "list".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.restype = "container"
+ self.comp = "list"
+
+ async def create(self, properties=None, request_id=None, timeout=None, *, cls=None, **kwargs):
+ """Create FileSystem.
+
+ Create a FileSystem rooted at the specified location. If the FileSystem
+ already exists, the operation fails. This operation does not support
+ conditional HTTP requests.
+
+ :param properties: Optional. User-defined properties to be stored with
+ the filesystem, in the format of a comma-separated list of name and
+ value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
+ string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties
+ not included in the list will be removed. All properties are removed
+ if the header is omitted. To merge new and existing properties, first
+ get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all
+ properties.
+ :type properties: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{filesystem}'}
+
+ async def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Set FileSystem Properties.
+
+ Set properties for the FileSystem. This operation supports conditional
+ HTTP requests. For more information, see [Specifying Conditional
+ Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+
+ :param properties: Optional. User-defined properties to be stored with
+ the filesystem, in the format of a comma-separated list of name and
+ value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
+ string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties
+ not included in the list will be removed. All properties are removed
+ if the header is omitted. To merge new and existing properties, first
+ get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all
+ properties.
+ :type properties: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ # Construct URL
+ url = self.set_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_properties.metadata = {'url': '/{filesystem}'}
+
+ async def get_properties(self, request_id=None, timeout=None, *, cls=None, **kwargs):
+ """Get FileSystem Properties.
+
+ All system and user-defined filesystem properties are specified in the
+ response headers.
+
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
+ 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_properties.metadata = {'url': '/{filesystem}'}
+
+ async def delete(self, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Delete FileSystem.
+
+ Marks the FileSystem for deletion. When a FileSystem is deleted, a
+ FileSystem with the same identifier cannot be created for at least 30
+ seconds. While the filesystem is being deleted, attempts to create a
+ filesystem with the same identifier will fail with status code 409
+ (Conflict), with the service returning additional error information
+ indicating that the filesystem is being deleted. All other operations,
+ including operations on any files or directories within the filesystem,
+ will fail with status code 404 (Not Found) while the filesystem is
+ being deleted. This operation supports conditional HTTP requests. For
+ more information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{filesystem}'}
+
+ async def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, *, cls=None, **kwargs):
+ """List Paths.
+
+ List FileSystem paths and their properties.
+
+ :param recursive: Required
+ :type recursive: bool
+ :param continuation: Optional. When deleting a directory, the number
+ of paths that are deleted with each invocation is limited. If the
+ number of paths to be deleted exceeds this limit, a continuation token
+ is returned in this response header. When a continuation token is
+ returned in the response, it must be specified in a subsequent
+ invocation of the delete operation to continue deleting the directory.
+ :type continuation: str
+ :param path: Optional. Filters results to paths within the specified
+ directory. An error occurs if the directory does not exist.
+ :type path: str
+ :param max_results: An optional value that specifies the maximum
+ number of items to return. If omitted or greater than 5,000, the
+ response will include up to 5,000 items.
+ :type max_results: int
+ :param upn: Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the user identity values returned
+ in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false. Note that group and
+ application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: PathList or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.PathList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ # Construct URL
+ url = self.list_paths.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if path is not None:
+ query_parameters['directory'] = self._serialize.query("path", path, 'str')
+ query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PathList', response)
+ header_dict = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_paths.metadata = {'url': '/{filesystem}'}
+
+ async def list_blob_hierarchy_segment(self, delimiter=None, prefix=None, marker=None, max_results=None, include=None, showonly=None, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """[Update] The List Blobs operation returns a list of the blobs under the
+ specified container.
+
+ :param delimiter: When the request includes this parameter, the
+ operation returns a BlobPrefix element in the response body that acts
+ as a placeholder for all blobs whose names begin with the same
+ substring up to the appearance of the delimiter character. The
+ delimiter may be a single character or a string.
+ :type delimiter: str
+ :param prefix: Filters results to filesystems within the specified
+ prefix.
+ :type prefix: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param max_results: An optional value that specifies the maximum
+ number of items to return. If omitted or greater than 5,000, the
+ response will include up to 5,000 items.
+ :type max_results: int
+ :param include: Include this parameter to specify one or more datasets
+ to include in the response.
+ :type include: list[str or
+ ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+ :param showonly: Include this parameter to specify one or more
+ datasets to include in the response. Possible values include:
+ 'deleted'
+ :type showonly: str or
+ ~azure.storage.filedatalake.models.ListBlobsShowOnly
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: ListBlobsHierarchySegmentResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ # Construct URL
+ url = self.list_blob_hierarchy_segment.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if delimiter is not None:
+ query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
+ if showonly is not None:
+ query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'ListBlobsShowOnly')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
+ query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response)
+ header_dict = {
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_path_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_path_operations_async.py
new file mode 100644
index 00000000000..6a26574cd68
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_path_operations_async.py
@@ -0,0 +1,1765 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class PathOperations:
+ """PathOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+
+ async def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
+ """Create File | Create Directory | Rename File | Rename Directory.
+
+ Create or rename a file or directory. By default, the destination is
+ overwritten and if the destination already exists and has a lease the
+ lease is broken. This operation supports conditional HTTP requests.
+ For more information, see [Specifying Conditional Headers for Blob
+ Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ To fail if the destination already exists, use a conditional request
+ with If-None-Match: "*".
+
+ :param resource: Required only for Create File and Create Directory.
+ The value must be "file" or "directory". Possible values include:
+ 'directory', 'file'
+ :type resource: str or
+ ~azure.storage.filedatalake.models.PathResourceType
+ :param continuation: Optional. When deleting a directory, the number
+ of paths that are deleted with each invocation is limited. If the
+ number of paths to be deleted exceeds this limit, a continuation token
+ is returned in this response header. When a continuation token is
+ returned in the response, it must be specified in a subsequent
+ invocation of the delete operation to continue deleting the directory.
+ :type continuation: str
+ :param mode: Optional. Valid only when namespace is enabled. This
+ parameter determines the behavior of the rename operation. The value
+ must be "legacy" or "posix", and the default value will be "posix".
+ Possible values include: 'legacy', 'posix'
+ :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+ :param rename_source: An optional file or directory to be renamed.
+ The value must have the following format: "/{filesystem}/{path}". If
+ "x-ms-properties" is specified, the properties will overwrite the
+ existing properties; otherwise, the existing properties will be
+ preserved. This value must be a URL percent-encoded string. Note that
+ the string may only contain ASCII characters in the ISO-8859-1
+ character set.
+ :type rename_source: str
+ :param source_lease_id: A lease ID for the source path. If specified,
+ the source path must have an active lease and the lease ID must match.
+ :type source_lease_id: str
+ :param properties: Optional. User-defined properties to be stored with
+ the filesystem, in the format of a comma-separated list of name and
+ value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
+ string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties
+ not included in the list will be removed. All properties are removed
+ if the header is omitted. To merge new and existing properties, first
+ get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all
+ properties.
+ :type properties: str
+ :param permissions: Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :type permissions: str
+ :param umask: Optional and only valid if Hierarchical Namespace is
+ enabled for the account. When creating a file or directory and the
+ parent folder does not have a default ACL, the umask restricts the
+ permissions of the file or directory to be created. The resulting
+ permission is given by p bitwise and not u, where p is the permission
+ and u is the umask. For example, if p is 0777 and u is 0057, then the
+ resulting permission is 0720. The default permission is 0777 for a
+ directory and 0666 for a file. The default umask is 0027. The umask
+ must be specified in 4-digit octal notation (e.g. 0766).
+ :type umask: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param path_http_headers: Additional parameters for the operation
+ :type path_http_headers:
+ ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ cache_control = None
+ if path_http_headers is not None:
+ cache_control = path_http_headers.cache_control
+ content_encoding = None
+ if path_http_headers is not None:
+ content_encoding = path_http_headers.content_encoding
+ content_language = None
+ if path_http_headers is not None:
+ content_language = path_http_headers.content_language
+ content_disposition = None
+ if path_http_headers is not None:
+ content_disposition = path_http_headers.content_disposition
+ content_type = None
+ if path_http_headers is not None:
+ content_type = path_http_headers.content_type
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if resource is not None:
+ query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if mode is not None:
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if rename_source is not None:
+ header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
+ if source_lease_id is not None:
+ header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def update(self, action, mode, body, max_records=None, continuation=None, force_flag=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Append Data | Flush Data | Set Properties | Set Access Control.
+
+ Uploads data to be appended to a file, flushes (writes) previously
+ uploaded data to a file, sets properties for a file or directory, or
+ sets access control for a file or directory. Data can only be appended
+ to a file. This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+
+ :param action: The action must be "append" to upload data to be
+ appended to a file, "flush" to flush previously uploaded data to a
+ file, "setProperties" to set the properties of a file or directory,
+ "setAccessControl" to set the owner, group, permissions, or access
+ control list for a file or directory, or "setAccessControlRecursive"
+ to set the access control list for a directory recursively. Note that
+ Hierarchical Namespace must be enabled for the account in order to use
+ access control. Also note that the Access Control List (ACL) includes
+ permissions for the owner, owning group, and others, so the
+ x-ms-permissions and x-ms-acl request headers are mutually exclusive.
+ Possible values include: 'append', 'flush', 'setProperties',
+ 'setAccessControl', 'setAccessControlRecursive'
+ :type action: str or
+ ~azure.storage.filedatalake.models.PathUpdateAction
+ :param mode: Mode "set" sets POSIX access control rights on files and
+ directories, "modify" modifies one or more POSIX access control rights
+ that pre-exist on files and directories, "remove" removes one or more
+ POSIX access control rights that were present earlier on files and
+ directories. Possible values include: 'set', 'modify', 'remove'
+ :type mode: str or
+ ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+ :param body: Initial data
+ :type body: Generator
+ :param max_records: Optional. Valid for "SetAccessControlRecursive"
+ operation. It specifies the maximum number of files or directories on
+ which the acl change will be applied. If omitted or greater than
+ 2,000, the request will process up to 2,000 items
+ :type max_records: int
+ :param continuation: Optional. The number of paths processed with each
+ invocation is limited. If the number of paths to be processed exceeds
+ this limit, a continuation token is returned in the response header
+ x-ms-continuation. When a continuation token is returned in the
+ response, it must be percent-encoded and specified in a subsequent
+ invocation of setAcessControlRecursive operation.
+ :type continuation: str
+ :param force_flag: Optional. Valid for "SetAccessControlRecursive"
+ operation. If set to false, the operation will terminate quickly on
+ encountering user errors (4XX). If true, the operation will ignore
+ user errors and proceed with the operation on other sub-entities of
+ the directory. Continuation token will only be returned when forceFlag
+ is true in case of user errors. If not set the default value is false
+ for this.
+ :type force_flag: bool
+ :param position: This parameter allows the caller to upload data in
+ parallel and control the order in which it is appended to the file.
+ It is required when uploading data to be appended to the file and when
+ flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not
+ immediately flushed, or written, to the file. To flush, the
+ previously uploaded data must be contiguous, the position parameter
+ must be specified and equal to the length of the file after all data
+ has been written, and there must not be a request entity body included
+ with the request.
+ :type position: long
+ :param retain_uncommitted_data: Valid only for flush operations. If
+ "true", uncommitted data is retained after the flush operation
+ completes; otherwise, the uncommitted data is deleted after the flush
+ operation. The default is false. Data at offsets less than the
+ specified position are written to the file when flush succeeds, but
+ this optional parameter allows data after the flush position to be
+ retained for a future flush operation.
+ :type retain_uncommitted_data: bool
+ :param close: Azure Storage Events allow applications to receive
+ notifications when files change. When Azure Storage Events are
+ enabled, a file changed event is raised. This event has a property
+ indicating whether this is the final change to distinguish the
+ difference between an intermediate flush to a file stream and the
+ final close of a file stream. The close query parameter is valid only
+ when the action is "flush" and change notifications are enabled. If
+ the value of close is "true" and the flush operation completes
+ successfully, the service raises a file change notification with a
+ property indicating that this is the final update (the file stream has
+ been closed). If "false" a change notification is raised indicating
+ the file has changed. The default is false. This query parameter is
+ set to true by the Hadoop ABFS driver to indicate that the file stream
+ has been closed."
+ :type close: bool
+ :param content_length: Required for "Append Data" and "Flush Data".
+ Must be 0 for "Flush Data". Must be the length of the request content
+ in bytes for "Append Data".
+ :type content_length: long
+ :param properties: Optional. User-defined properties to be stored with
+ the filesystem, in the format of a comma-separated list of name and
+ value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded
+ string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties
+ not included in the list will be removed. All properties are removed
+ if the header is omitted. To merge new and existing properties, first
+ get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all
+ properties.
+ :type properties: str
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param permissions: Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :type permissions: str
+ :param acl: Sets POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param path_http_headers: Additional parameters for the operation
+ :type path_http_headers:
+ ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: SetAccessControlRecursiveResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ content_md5 = None
+ if path_http_headers is not None:
+ content_md5 = path_http_headers.content_md5
+ cache_control = None
+ if path_http_headers is not None:
+ cache_control = path_http_headers.cache_control
+ content_type = None
+ if path_http_headers is not None:
+ content_type = path_http_headers.content_type
+ content_disposition = None
+ if path_http_headers is not None:
+ content_disposition = path_http_headers.content_disposition
+ content_encoding = None
+ if path_http_headers is not None:
+ content_encoding = path_http_headers.content_encoding
+ content_language = None
+ if path_http_headers is not None:
+ content_language = path_http_headers.content_language
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ # Construct URL
+ url = self.update.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction')
+ if max_records is not None:
+ query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1)
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode')
+ if force_flag is not None:
+ query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool')
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if retain_uncommitted_data is not None:
+ query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
+ if close is not None:
+ query_parameters['close'] = self._serialize.query("close", close, 'bool')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if content_md5 is not None:
+ header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('SetAccessControlRecursiveResponse', response)
+ header_dict = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
+ 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ update.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Lease Path.
+
+ Create and manage a lease to restrict write and delete access to the
+ path. This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+
+ :param x_ms_lease_action: There are five lease actions: "acquire",
+ "break", "change", "renew", and "release". Use "acquire" and specify
+ the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a
+ new lease. Use "break" to break an existing lease. When a lease is
+ broken, the lease break period is allowed to elapse, during which time
+ no lease operation except break and release can be performed on the
+ file. When a lease is successfully broken, the response indicates the
+ interval in seconds until a new lease can be acquired. Use "change"
+ and specify the current lease ID in "x-ms-lease-id" and the new lease
+ ID in "x-ms-proposed-lease-id" to change the lease ID of an active
+ lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+ existing lease. Use "release" and specify the "x-ms-lease-id" to
+ release a lease. Possible values include: 'acquire', 'break',
+ 'change', 'renew', 'release'
+ :type x_ms_lease_action: str or
+ ~azure.storage.filedatalake.models.PathLeaseAction
+ :param x_ms_lease_duration: The lease duration is required to acquire
+ a lease, and specifies the duration of the lease in seconds. The
+ lease duration must be between 15 and 60 seconds or -1 for infinite
+ lease.
+ :type x_ms_lease_duration: int
+ :param x_ms_lease_break_period: The lease break period duration is
+ optional to break a lease, and specifies the break period of the
+ lease in seconds. The lease break duration must be between 0 and 60
+ seconds.
+ :type x_ms_lease_break_period: int
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ # Construct URL
+ url = self.lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction')
+ if x_ms_lease_duration is not None:
+ header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int')
+ if x_ms_lease_break_period is not None:
+ header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int')
+ if proposed_lease_id is not None:
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ lease.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Read File.
+
+ Read the contents of a file. For read operations, range requests are
+ supported. This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+
+ :param range: The HTTP Range request header specifies one or more byte
+ ranges of the resource to be retrieved.
+ :type range: str
+ :param x_ms_range_get_content_md5: Optional. When this header is set
+ to "true" and specified together with the Range header, the service
+ returns the MD5 hash for the range, as long as the range is less than
+ or equal to 4MB in size. If this header is specified without the Range
+ header, the service returns status code 400 (Bad Request). If this
+ header is set to true when the range exceeds 4 MB in size, the service
+ returns status code 400 (Bad Request).
+ :type x_ms_range_get_content_md5: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: object or the result of cls(response)
+ :rtype: Generator
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ # Construct URL
+ url = self.read.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if range is not None:
+ header_parameters['Range'] = self._serialize.header("range", range, 'str')
+ if x_ms_range_get_content_md5 is not None:
+ header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 206]:
+ await response.load_body()
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
+ 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
+ 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
+ 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ if response.status_code == 206:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
+ 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
+ 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
+ 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ read.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Get Properties | Get Status | Get Access Control List.
+
+ Get Properties returns all system and user defined properties for a
+ path. Get Status returns all system defined properties for a path. Get
+ Access Control List returns the access control list for a path. This
+ operation supports conditional HTTP requests. For more information,
+ see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+
+ :param action: Optional. If the value is "getStatus" only the system
+ defined properties for the path are returned. If the value is
+ "getAccessControl" the access control list is returned in the response
+ headers (Hierarchical Namespace must be enabled for the account),
+ otherwise the properties are returned. Possible values include:
+ 'getAccessControl', 'getStatus'
+ :type action: str or
+ ~azure.storage.filedatalake.models.PathGetPropertiesAction
+ :param upn: Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the user identity values returned
+ in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false. Note that group and
+ application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if action is not None:
+ query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction')
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')),
+ 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')),
+ 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
+ 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
+ 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
+ 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
+ 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_properties.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Delete File | Delete Directory.
+
+ Delete the file or directory. This operation supports conditional HTTP
+ requests. For more information, see [Specifying Conditional Headers
+ for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+
+ :param recursive: Required
+ :type recursive: bool
+ :param continuation: Optional. When deleting a directory, the number
+ of paths that are deleted with each invocation is limited. If the
+ number of paths to be deleted exceeds this limit, a continuation token
+ is returned in this response header. When a continuation token is
+ returned in the response, it must be specified in a subsequent
+ invocation of the delete operation to continue deleting the directory.
+ :type continuation: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if recursive is not None:
+ query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'x-ms-deletion-id': self._deserialize('str', response.headers.get('x-ms-deletion-id')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Set the owner, group, permissions, or access control list for a path.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param permissions: Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :type permissions: str
+ :param acl: Sets POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "setAccessControl"
+
+ # Construct URL
+ url = self.set_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ }
+ return cls(response, None, response_headers)
+ set_access_control.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def set_access_control_recursive(self, mode, timeout=None, continuation=None, force_flag=None, max_records=None, acl=None, request_id=None, *, cls=None, **kwargs):
+ """Set the access control list for a path and subpaths.
+
+ :param mode: Mode "set" sets POSIX access control rights on files and
+ directories, "modify" modifies one or more POSIX access control rights
+ that pre-exist on files and directories, "remove" removes one or more
+ POSIX access control rights that were present earlier on files and
+ directories. Possible values include: 'set', 'modify', 'remove'
+ :type mode: str or
+ ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param continuation: Optional. When deleting a directory, the number
+ of paths that are deleted with each invocation is limited. If the
+ number of paths to be deleted exceeds this limit, a continuation token
+ is returned in this response header. When a continuation token is
+ returned in the response, it must be specified in a subsequent
+ invocation of the delete operation to continue deleting the directory.
+ :type continuation: str
+ :param force_flag: Optional. Valid for "SetAccessControlRecursive"
+ operation. If set to false, the operation will terminate quickly on
+ encountering user errors (4XX). If true, the operation will ignore
+ user errors and proceed with the operation on other sub-entities of
+ the directory. Continuation token will only be returned when forceFlag
+ is true in case of user errors. If not set the default value is false
+ for this.
+ :type force_flag: bool
+ :param max_records: Optional. It specifies the maximum number of files
+ or directories on which the acl change will be applied. If omitted or
+ greater than 2,000, the request will process up to 2,000 items
+ :type max_records: int
+ :param acl: Sets POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: SetAccessControlRecursiveResponse or the result of
+ cls(response)
+ :rtype:
+ ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ action = "setAccessControlRecursive"
+
+ # Construct URL
+ url = self.set_access_control_recursive.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode')
+ if force_flag is not None:
+ query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool')
+ if max_records is not None:
+ query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('SetAccessControlRecursiveResponse', response)
+ header_dict = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Set the owner, group, permissions, or access control list for a path.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param position: This parameter allows the caller to upload data in
+ parallel and control the order in which it is appended to the file.
+ It is required when uploading data to be appended to the file and when
+ flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not
+ immediately flushed, or written, to the file. To flush, the
+ previously uploaded data must be contiguous, the position parameter
+ must be specified and equal to the length of the file after all data
+ has been written, and there must not be a request entity body included
+ with the request.
+ :type position: long
+ :param retain_uncommitted_data: Valid only for flush operations. If
+ "true", uncommitted data is retained after the flush operation
+ completes; otherwise, the uncommitted data is deleted after the flush
+ operation. The default is false. Data at offsets less than the
+ specified position are written to the file when flush succeeds, but
+ this optional parameter allows data after the flush position to be
+ retained for a future flush operation.
+ :type retain_uncommitted_data: bool
+ :param close: Azure Storage Events allow applications to receive
+ notifications when files change. When Azure Storage Events are
+ enabled, a file changed event is raised. This event has a property
+ indicating whether this is the final change to distinguish the
+ difference between an intermediate flush to a file stream and the
+ final close of a file stream. The close query parameter is valid only
+ when the action is "flush" and change notifications are enabled. If
+ the value of close is "true" and the flush operation completes
+ successfully, the service raises a file change notification with a
+ property indicating that this is the final update (the file stream has
+ been closed). If "false" a change notification is raised indicating
+ the file has changed. The default is false. This query parameter is
+ set to true by the Hadoop ABFS driver to indicate that the file stream
+ has been closed."
+ :type close: bool
+ :param content_length: Required for "Append Data" and "Flush Data".
+ Must be 0 for "Flush Data". Must be the length of the request content
+ in bytes for "Append Data".
+ :type content_length: long
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param path_http_headers: Additional parameters for the operation
+ :type path_http_headers:
+ ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ content_md5 = None
+ if path_http_headers is not None:
+ content_md5 = path_http_headers.content_md5
+ cache_control = None
+ if path_http_headers is not None:
+ cache_control = path_http_headers.cache_control
+ content_type = None
+ if path_http_headers is not None:
+ content_type = path_http_headers.content_type
+ content_disposition = None
+ if path_http_headers is not None:
+ content_disposition = path_http_headers.content_disposition
+ content_encoding = None
+ if path_http_headers is not None:
+ content_encoding = path_http_headers.content_encoding
+ content_language = None
+ if path_http_headers is not None:
+ content_language = path_http_headers.content_language
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "flush"
+
+ # Construct URL
+ url = self.flush_data.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if retain_uncommitted_data is not None:
+ query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
+ if close is not None:
+ query_parameters['close'] = self._serialize.query("close", close, 'bool')
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if content_md5 is not None:
+ header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ }
+ return cls(response, None, response_headers)
+ flush_data.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def append_data(self, body, position=None, timeout=None, content_length=None, transactional_content_crc64=None, request_id=None, path_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs):
+ """Append data to the file.
+
+ :param body: Initial data
+ :type body: Generator
+ :param position: This parameter allows the caller to upload data in
+ parallel and control the order in which it is appended to the file.
+ It is required when uploading data to be appended to the file and when
+ flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not
+ immediately flushed, or written, to the file. To flush, the
+ previously uploaded data must be contiguous, the position parameter
+ must be specified and equal to the length of the file after all data
+ has been written, and there must not be a request entity body included
+ with the request.
+ :type position: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param content_length: Required for "Append Data" and "Flush Data".
+ Must be 0 for "Flush Data". Must be the length of the request content
+ in bytes for "Append Data".
+ :type content_length: long
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param path_http_headers: Additional parameters for the operation
+ :type path_http_headers:
+ ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ transactional_content_hash = None
+ if path_http_headers is not None:
+ transactional_content_hash = path_http_headers.transactional_content_hash
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ action = "append"
+
+ # Construct URL
+ url = self.append_data.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if transactional_content_hash is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ }
+ return cls(response, None, response_headers)
+ append_data.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs):
+ """Sets the time a blob will expire and be deleted.
+
+ :param expiry_options: Required. Indicates mode of the expiry time.
+ Possible values include: 'NeverExpire', 'RelativeToCreation',
+ 'RelativeToNow', 'Absolute'
+ :type expiry_options: str or
+ ~azure.storage.filedatalake.models.PathExpiryOptions
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param expires_on: The time to set the blob to expiry
+ :type expires_on: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "expiry"
+
+ # Construct URL
+ url = self.set_expiry.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str')
+ if expires_on is not None:
+ header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_expiry.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def undelete(self, timeout=None, undelete_source=None, request_id=None, *, cls=None, **kwargs):
+ """Undelete a path that was previously soft deleted.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param undelete_source: Only for hierarchical namespace enabled
+ accounts. Optional. The path of the soft deleted blob to undelete.
+ :type undelete_source: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "undelete"
+
+ # Construct URL
+ url = self.undelete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if undelete_source is not None:
+ header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ undelete.metadata = {'url': '/{filesystem}/{path}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_service_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_service_operations_async.py
new file mode 100644
index 00000000000..b4cb9c5a7ee
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/aio/operations_async/_service_operations_async.py
@@ -0,0 +1,128 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class ServiceOperations:
+ """ServiceOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar resource: The value must be "account" for all account operations. Constant value: "account".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.resource = "account"
+
+ async def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, *, cls=None, **kwargs):
+ """List FileSystems.
+
+ List filesystems and their properties in given account.
+
+ :param prefix: Filters results to filesystems within the specified
+ prefix.
+ :type prefix: str
+ :param continuation: Optional. When deleting a directory, the number
+ of paths that are deleted with each invocation is limited. If the
+ number of paths to be deleted exceeds this limit, a continuation token
+ is returned in this response header. When a continuation token is
+ returned in the response, it must be specified in a subsequent
+ invocation of the delete operation to continue deleting the directory.
+ :type continuation: str
+ :param max_results: An optional value that specifies the maximum
+ number of items to return. If omitted or greater than 5,000, the
+ response will include up to 5,000 items.
+ :type max_results: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: FileSystemList or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.FileSystemList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ # Construct URL
+ url = self.list_file_systems.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str')
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/json'
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('FileSystemList', response)
+ header_dict = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_file_systems.metadata = {'url': '/'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/__init__.py
new file mode 100644
index 00000000000..fc4548f39b1
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/__init__.py
@@ -0,0 +1,83 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+try:
+ from ._models_py3 import AclFailedEntry
+ from ._models_py3 import BlobHierarchyListSegment
+ from ._models_py3 import BlobItemInternal
+ from ._models_py3 import BlobPrefix
+ from ._models_py3 import BlobPropertiesInternal
+ from ._models_py3 import FileSystem
+ from ._models_py3 import FileSystemList
+ from ._models_py3 import LeaseAccessConditions
+ from ._models_py3 import ListBlobsHierarchySegmentResponse
+ from ._models_py3 import ModifiedAccessConditions
+ from ._models_py3 import Path
+ from ._models_py3 import PathHTTPHeaders
+ from ._models_py3 import PathList
+ from ._models_py3 import SetAccessControlRecursiveResponse
+ from ._models_py3 import SourceModifiedAccessConditions
+ from ._models_py3 import StorageError
+ from ._models_py3 import StorageErrorError
+except (SyntaxError, ImportError):
+ from ._models import AclFailedEntry # type: ignore
+ from ._models import BlobHierarchyListSegment # type: ignore
+ from ._models import BlobItemInternal # type: ignore
+ from ._models import BlobPrefix # type: ignore
+ from ._models import BlobPropertiesInternal # type: ignore
+ from ._models import FileSystem # type: ignore
+ from ._models import FileSystemList # type: ignore
+ from ._models import LeaseAccessConditions # type: ignore
+ from ._models import ListBlobsHierarchySegmentResponse # type: ignore
+ from ._models import ModifiedAccessConditions # type: ignore
+ from ._models import Path # type: ignore
+ from ._models import PathHTTPHeaders # type: ignore
+ from ._models import PathList # type: ignore
+ from ._models import SetAccessControlRecursiveResponse # type: ignore
+ from ._models import SourceModifiedAccessConditions # type: ignore
+ from ._models import StorageError # type: ignore
+ from ._models import StorageErrorError # type: ignore
+
+from ._azure_data_lake_storage_restapi_enums import (
+ ListBlobsIncludeItem,
+ PathExpiryOptions,
+ PathGetPropertiesAction,
+ PathLeaseAction,
+ PathRenameMode,
+ PathResourceType,
+ PathSetAccessControlRecursiveMode,
+ PathUpdateAction,
+)
+
+__all__ = [
+ 'AclFailedEntry',
+ 'BlobHierarchyListSegment',
+ 'BlobItemInternal',
+ 'BlobPrefix',
+ 'BlobPropertiesInternal',
+ 'FileSystem',
+ 'FileSystemList',
+ 'LeaseAccessConditions',
+ 'ListBlobsHierarchySegmentResponse',
+ 'ModifiedAccessConditions',
+ 'Path',
+ 'PathHTTPHeaders',
+ 'PathList',
+ 'SetAccessControlRecursiveResponse',
+ 'SourceModifiedAccessConditions',
+ 'StorageError',
+ 'StorageErrorError',
+ 'ListBlobsIncludeItem',
+ 'PathExpiryOptions',
+ 'PathGetPropertiesAction',
+ 'PathLeaseAction',
+ 'PathRenameMode',
+ 'PathResourceType',
+ 'PathSetAccessControlRecursiveMode',
+ 'PathUpdateAction',
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py
new file mode 100644
index 00000000000..804050e9e0d
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py
@@ -0,0 +1,81 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum, EnumMeta
+from six import with_metaclass
+
+class _CaseInsensitiveEnumMeta(EnumMeta):
+ def __getitem__(self, name):
+ return super().__getitem__(name.upper())
+
+ def __getattr__(cls, name):
+ """Return the enum member matching `name`
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+ """
+ try:
+ return cls._member_map_[name.upper()]
+ except KeyError:
+ raise AttributeError(name)
+
+
+class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ COPY = "copy"
+ DELETED = "deleted"
+ METADATA = "metadata"
+ SNAPSHOTS = "snapshots"
+ UNCOMMITTEDBLOBS = "uncommittedblobs"
+ VERSIONS = "versions"
+ TAGS = "tags"
+
+class PathExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ NEVER_EXPIRE = "NeverExpire"
+ RELATIVE_TO_CREATION = "RelativeToCreation"
+ RELATIVE_TO_NOW = "RelativeToNow"
+ ABSOLUTE = "Absolute"
+
+class PathGetPropertiesAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ GET_ACCESS_CONTROL = "getAccessControl"
+ GET_STATUS = "getStatus"
+
+class PathLeaseAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ ACQUIRE = "acquire"
+ BREAK_ENUM = "break"
+ CHANGE = "change"
+ RENEW = "renew"
+ RELEASE = "release"
+
+class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ LEGACY = "legacy"
+ POSIX = "posix"
+
+class PathResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ DIRECTORY = "directory"
+ FILE = "file"
+
+class PathSetAccessControlRecursiveMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ SET = "set"
+ MODIFY = "modify"
+ REMOVE = "remove"
+
+class PathUpdateAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ APPEND = "append"
+ FLUSH = "flush"
+ SET_PROPERTIES = "setProperties"
+ SET_ACCESS_CONTROL = "setAccessControl"
+ SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive"
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_data_lake_storage_client_enums.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_data_lake_storage_client_enums.py
new file mode 100644
index 00000000000..5a1f13240c6
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_data_lake_storage_client_enums.py
@@ -0,0 +1,79 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+
+
+class PathSetAccessControlRecursiveMode(str, Enum):
+
+ set = "set"
+ modify = "modify"
+ remove = "remove"
+
+
+class PathExpiryOptions(str, Enum):
+
+ never_expire = "NeverExpire"
+ relative_to_creation = "RelativeToCreation"
+ relative_to_now = "RelativeToNow"
+ absolute = "Absolute"
+
+
+class ListBlobsIncludeItem(str, Enum):
+
+ copy = "copy"
+ deleted = "deleted"
+ metadata = "metadata"
+ snapshots = "snapshots"
+ uncommittedblobs = "uncommittedblobs"
+ versions = "versions"
+ tags = "tags"
+
+
+class ListBlobsShowOnly(str, Enum):
+
+ deleted = "deleted"
+
+
+class PathResourceType(str, Enum):
+
+ directory = "directory"
+ file = "file"
+
+
+class PathRenameMode(str, Enum):
+
+ legacy = "legacy"
+ posix = "posix"
+
+
+class PathUpdateAction(str, Enum):
+
+ append = "append"
+ flush = "flush"
+ set_properties = "setProperties"
+ set_access_control = "setAccessControl"
+ set_access_control_recursive = "setAccessControlRecursive"
+
+
+class PathLeaseAction(str, Enum):
+
+ acquire = "acquire"
+ break_enum = "break"
+ change = "change"
+ renew = "renew"
+ release = "release"
+
+
+class PathGetPropertiesAction(str, Enum):
+
+ get_access_control = "getAccessControl"
+ get_status = "getStatus"
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_models.py
new file mode 100644
index 00000000000..237617a48a1
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_models.py
@@ -0,0 +1,672 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import HttpResponseError
+import msrest.serialization
+
+
+class AclFailedEntry(msrest.serialization.Model):
+ """AclFailedEntry.
+
+ :param name:
+ :type name: str
+ :param type:
+ :type type: str
+ :param error_message:
+ :type error_message: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'error_message': {'key': 'errorMessage', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(AclFailedEntry, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.type = kwargs.get('type', None)
+ self.error_message = kwargs.get('error_message', None)
+
+
+class BlobHierarchyListSegment(msrest.serialization.Model):
+ """BlobHierarchyListSegment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_prefixes:
+ :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
+ :param blob_items: Required.
+ :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
+ """
+
+ _validation = {
+ 'blob_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'},
+ 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'},
+ }
+ _xml_map = {
+ 'name': 'Blobs'
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(BlobHierarchyListSegment, self).__init__(**kwargs)
+ self.blob_prefixes = kwargs.get('blob_prefixes', None)
+ self.blob_items = kwargs['blob_items']
+
+
+class BlobItemInternal(msrest.serialization.Model):
+ """An Azure Storage blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param deleted: Required.
+ :type deleted: bool
+ :param snapshot: Required.
+ :type snapshot: str
+ :param version_id:
+ :type version_id: str
+ :param is_current_version:
+ :type is_current_version: bool
+ :param properties: Required. Properties of a blob.
+ :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
+ :param deletion_id:
+ :type deletion_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'deleted': {'required': True},
+ 'snapshot': {'required': True},
+ 'properties': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str'},
+ 'deleted': {'key': 'Deleted', 'type': 'bool'},
+ 'snapshot': {'key': 'Snapshot', 'type': 'str'},
+ 'version_id': {'key': 'VersionId', 'type': 'str'},
+ 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'},
+ 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'},
+ 'deletion_id': {'key': 'DeletionId', 'type': 'str'},
+ }
+ _xml_map = {
+ 'name': 'Blob'
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(BlobItemInternal, self).__init__(**kwargs)
+ self.name = kwargs['name']
+ self.deleted = kwargs['deleted']
+ self.snapshot = kwargs['snapshot']
+ self.version_id = kwargs.get('version_id', None)
+ self.is_current_version = kwargs.get('is_current_version', None)
+ self.properties = kwargs['properties']
+ self.deletion_id = kwargs.get('deletion_id', None)
+
+
+class BlobPrefix(msrest.serialization.Model):
+ """BlobPrefix.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(BlobPrefix, self).__init__(**kwargs)
+ self.name = kwargs['name']
+
+
+class BlobPropertiesInternal(msrest.serialization.Model):
+ """Properties of a blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param creation_time:
+ :type creation_time: ~datetime.datetime
+ :param last_modified: Required.
+ :type last_modified: ~datetime.datetime
+ :param etag: Required.
+ :type etag: str
+ :param content_length: Size in bytes.
+ :type content_length: long
+ :param content_type:
+ :type content_type: str
+ :param content_encoding:
+ :type content_encoding: str
+ :param content_language:
+ :type content_language: str
+ :param content_md5:
+ :type content_md5: bytearray
+ :param content_disposition:
+ :type content_disposition: str
+ :param cache_control:
+ :type cache_control: str
+ :param blob_sequence_number:
+ :type blob_sequence_number: long
+ :param copy_id:
+ :type copy_id: str
+ :param copy_source:
+ :type copy_source: str
+ :param copy_progress:
+ :type copy_progress: str
+ :param copy_completion_time:
+ :type copy_completion_time: ~datetime.datetime
+ :param copy_status_description:
+ :type copy_status_description: str
+ :param server_encrypted:
+ :type server_encrypted: bool
+ :param incremental_copy:
+ :type incremental_copy: bool
+ :param destination_snapshot:
+ :type destination_snapshot: str
+ :param deleted_time:
+ :type deleted_time: ~datetime.datetime
+ :param remaining_retention_days:
+ :type remaining_retention_days: int
+ :param access_tier_inferred:
+ :type access_tier_inferred: bool
+ :param customer_provided_key_sha256:
+ :type customer_provided_key_sha256: str
+ :param encryption_scope: The name of the encryption scope under which the blob is encrypted.
+ :type encryption_scope: str
+ :param access_tier_change_time:
+ :type access_tier_change_time: ~datetime.datetime
+ :param tag_count:
+ :type tag_count: int
+ :param expires_on:
+ :type expires_on: ~datetime.datetime
+ :param is_sealed:
+ :type is_sealed: bool
+ :param last_accessed_on:
+ :type last_accessed_on: ~datetime.datetime
+ :param delete_time:
+ :type delete_time: ~datetime.datetime
+ """
+
+ _validation = {
+ 'last_modified': {'required': True},
+ 'etag': {'required': True},
+ }
+
+ _attribute_map = {
+ 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'},
+ 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'},
+ 'etag': {'key': 'Etag', 'type': 'str'},
+ 'content_length': {'key': 'Content-Length', 'type': 'long'},
+ 'content_type': {'key': 'Content-Type', 'type': 'str'},
+ 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'},
+ 'content_language': {'key': 'Content-Language', 'type': 'str'},
+ 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'},
+ 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'},
+ 'cache_control': {'key': 'Cache-Control', 'type': 'str'},
+ 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'},
+ 'copy_id': {'key': 'CopyId', 'type': 'str'},
+ 'copy_source': {'key': 'CopySource', 'type': 'str'},
+ 'copy_progress': {'key': 'CopyProgress', 'type': 'str'},
+ 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'},
+ 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'},
+ 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'},
+ 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'},
+ 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'},
+ 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'},
+ 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'},
+ 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'},
+ 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'},
+ 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'},
+ 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'},
+ 'tag_count': {'key': 'TagCount', 'type': 'int'},
+ 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'},
+ 'is_sealed': {'key': 'Sealed', 'type': 'bool'},
+ 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'},
+ 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'},
+ }
+ _xml_map = {
+ 'name': 'Properties'
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(BlobPropertiesInternal, self).__init__(**kwargs)
+ self.creation_time = kwargs.get('creation_time', None)
+ self.last_modified = kwargs['last_modified']
+ self.etag = kwargs['etag']
+ self.content_length = kwargs.get('content_length', None)
+ self.content_type = kwargs.get('content_type', None)
+ self.content_encoding = kwargs.get('content_encoding', None)
+ self.content_language = kwargs.get('content_language', None)
+ self.content_md5 = kwargs.get('content_md5', None)
+ self.content_disposition = kwargs.get('content_disposition', None)
+ self.cache_control = kwargs.get('cache_control', None)
+ self.blob_sequence_number = kwargs.get('blob_sequence_number', None)
+ self.copy_id = kwargs.get('copy_id', None)
+ self.copy_source = kwargs.get('copy_source', None)
+ self.copy_progress = kwargs.get('copy_progress', None)
+ self.copy_completion_time = kwargs.get('copy_completion_time', None)
+ self.copy_status_description = kwargs.get('copy_status_description', None)
+ self.server_encrypted = kwargs.get('server_encrypted', None)
+ self.incremental_copy = kwargs.get('incremental_copy', None)
+ self.destination_snapshot = kwargs.get('destination_snapshot', None)
+ self.deleted_time = kwargs.get('deleted_time', None)
+ self.remaining_retention_days = kwargs.get('remaining_retention_days', None)
+ self.access_tier_inferred = kwargs.get('access_tier_inferred', None)
+ self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None)
+ self.encryption_scope = kwargs.get('encryption_scope', None)
+ self.access_tier_change_time = kwargs.get('access_tier_change_time', None)
+ self.tag_count = kwargs.get('tag_count', None)
+ self.expires_on = kwargs.get('expires_on', None)
+ self.is_sealed = kwargs.get('is_sealed', None)
+ self.last_accessed_on = kwargs.get('last_accessed_on', None)
+ self.delete_time = kwargs.get('delete_time', None)
+
+
+class FileSystem(msrest.serialization.Model):
+ """FileSystem.
+
+ :param name:
+ :type name: str
+ :param last_modified:
+ :type last_modified: str
+ :param e_tag:
+ :type e_tag: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'last_modified': {'key': 'lastModified', 'type': 'str'},
+ 'e_tag': {'key': 'eTag', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(FileSystem, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.last_modified = kwargs.get('last_modified', None)
+ self.e_tag = kwargs.get('e_tag', None)
+
+
+class FileSystemList(msrest.serialization.Model):
+ """FileSystemList.
+
+ :param filesystems:
+ :type filesystems: list[~azure.storage.filedatalake.models.FileSystem]
+ """
+
+ _attribute_map = {
+ 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(FileSystemList, self).__init__(**kwargs)
+ self.filesystems = kwargs.get('filesystems', None)
+
+
+class LeaseAccessConditions(msrest.serialization.Model):
+ """Parameter group.
+
+ :param lease_id: If specified, the operation only succeeds if the resource's lease is active
+ and matches this ID.
+ :type lease_id: str
+ """
+
+ _attribute_map = {
+ 'lease_id': {'key': 'leaseId', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(LeaseAccessConditions, self).__init__(**kwargs)
+ self.lease_id = kwargs.get('lease_id', None)
+
+
+class ListBlobsHierarchySegmentResponse(msrest.serialization.Model):
+ """An enumeration of blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param container_name: Required.
+ :type container_name: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param delimiter:
+ :type delimiter: str
+ :param segment: Required.
+ :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_name': {'required': True},
+ 'segment': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str'},
+ 'marker': {'key': 'Marker', 'type': 'str'},
+ 'max_results': {'key': 'MaxResults', 'type': 'int'},
+ 'delimiter': {'key': 'Delimiter', 'type': 'str'},
+ 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str'},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = kwargs['service_endpoint']
+ self.container_name = kwargs['container_name']
+ self.prefix = kwargs.get('prefix', None)
+ self.marker = kwargs.get('marker', None)
+ self.max_results = kwargs.get('max_results', None)
+ self.delimiter = kwargs.get('delimiter', None)
+ self.segment = kwargs['segment']
+ self.next_marker = kwargs.get('next_marker', None)
+
+
+class ModifiedAccessConditions(msrest.serialization.Model):
+ """Parameter group.
+
+ :param if_modified_since: Specify this header value to operate only on a blob if it has been
+ modified since the specified date/time.
+ :type if_modified_since: ~datetime.datetime
+ :param if_unmodified_since: Specify this header value to operate only on a blob if it has not
+ been modified since the specified date/time.
+ :type if_unmodified_since: ~datetime.datetime
+ :param if_match: Specify an ETag value to operate only on blobs with a matching value.
+ :type if_match: str
+ :param if_none_match: Specify an ETag value to operate only on blobs without a matching value.
+ :type if_none_match: str
+ """
+
+ _attribute_map = {
+ 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'},
+ 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'},
+ 'if_match': {'key': 'ifMatch', 'type': 'str'},
+ 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ModifiedAccessConditions, self).__init__(**kwargs)
+ self.if_modified_since = kwargs.get('if_modified_since', None)
+ self.if_unmodified_since = kwargs.get('if_unmodified_since', None)
+ self.if_match = kwargs.get('if_match', None)
+ self.if_none_match = kwargs.get('if_none_match', None)
+
+
+class Path(msrest.serialization.Model):
+ """Path.
+
+ :param name:
+ :type name: str
+ :param is_directory:
+ :type is_directory: bool
+ :param last_modified:
+ :type last_modified: str
+ :param e_tag:
+ :type e_tag: str
+ :param content_length:
+ :type content_length: long
+ :param owner:
+ :type owner: str
+ :param group:
+ :type group: str
+ :param permissions:
+ :type permissions: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'is_directory': {'key': 'isDirectory', 'type': 'bool'},
+ 'last_modified': {'key': 'lastModified', 'type': 'str'},
+ 'e_tag': {'key': 'eTag', 'type': 'str'},
+ 'content_length': {'key': 'contentLength', 'type': 'long'},
+ 'owner': {'key': 'owner', 'type': 'str'},
+ 'group': {'key': 'group', 'type': 'str'},
+ 'permissions': {'key': 'permissions', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(Path, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.is_directory = kwargs.get('is_directory', False)
+ self.last_modified = kwargs.get('last_modified', None)
+ self.e_tag = kwargs.get('e_tag', None)
+ self.content_length = kwargs.get('content_length', None)
+ self.owner = kwargs.get('owner', None)
+ self.group = kwargs.get('group', None)
+ self.permissions = kwargs.get('permissions', None)
+
+
+class PathHTTPHeaders(msrest.serialization.Model):
+ """Parameter group.
+
+ :param cache_control: Optional. Sets the blob's cache control. If specified, this property is
+ stored with the blob and returned with a read request.
+ :type cache_control: str
+ :param content_encoding: Optional. Sets the blob's content encoding. If specified, this
+ property is stored with the blob and returned with a read request.
+ :type content_encoding: str
+ :param content_language: Optional. Set the blob's content language. If specified, this property
+ is stored with the blob and returned with a read request.
+ :type content_language: str
+ :param content_disposition: Optional. Sets the blob's Content-Disposition header.
+ :type content_disposition: str
+ :param content_type: Optional. Sets the blob's content type. If specified, this property is
+ stored with the blob and returned with a read request.
+ :type content_type: str
+ :param content_md5: Specify the transactional md5 for the body, to be validated by the service.
+ :type content_md5: bytearray
+ :param transactional_content_hash: Specify the transactional md5 for the body, to be validated
+ by the service.
+ :type transactional_content_hash: bytearray
+ """
+
+ _attribute_map = {
+ 'cache_control': {'key': 'cacheControl', 'type': 'str'},
+ 'content_encoding': {'key': 'contentEncoding', 'type': 'str'},
+ 'content_language': {'key': 'contentLanguage', 'type': 'str'},
+ 'content_disposition': {'key': 'contentDisposition', 'type': 'str'},
+ 'content_type': {'key': 'contentType', 'type': 'str'},
+ 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'},
+ 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(PathHTTPHeaders, self).__init__(**kwargs)
+ self.cache_control = kwargs.get('cache_control', None)
+ self.content_encoding = kwargs.get('content_encoding', None)
+ self.content_language = kwargs.get('content_language', None)
+ self.content_disposition = kwargs.get('content_disposition', None)
+ self.content_type = kwargs.get('content_type', None)
+ self.content_md5 = kwargs.get('content_md5', None)
+ self.transactional_content_hash = kwargs.get('transactional_content_hash', None)
+
+
+class PathList(msrest.serialization.Model):
+ """PathList.
+
+ :param paths:
+ :type paths: list[~azure.storage.filedatalake.models.Path]
+ """
+
+ _attribute_map = {
+ 'paths': {'key': 'paths', 'type': '[Path]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(PathList, self).__init__(**kwargs)
+ self.paths = kwargs.get('paths', None)
+
+
+class SetAccessControlRecursiveResponse(msrest.serialization.Model):
+ """SetAccessControlRecursiveResponse.
+
+ :param directories_successful:
+ :type directories_successful: int
+ :param files_successful:
+ :type files_successful: int
+ :param failure_count:
+ :type failure_count: int
+ :param failed_entries:
+ :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry]
+ """
+
+ _attribute_map = {
+ 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'},
+ 'files_successful': {'key': 'filesSuccessful', 'type': 'int'},
+ 'failure_count': {'key': 'failureCount', 'type': 'int'},
+ 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(SetAccessControlRecursiveResponse, self).__init__(**kwargs)
+ self.directories_successful = kwargs.get('directories_successful', None)
+ self.files_successful = kwargs.get('files_successful', None)
+ self.failure_count = kwargs.get('failure_count', None)
+ self.failed_entries = kwargs.get('failed_entries', None)
+
+
+class SourceModifiedAccessConditions(msrest.serialization.Model):
+ """Parameter group.
+
+ :param source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+ :type source_if_match: str
+ :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching
+ value.
+ :type source_if_none_match: str
+ :param source_if_modified_since: Specify this header value to operate only on a blob if it has
+ been modified since the specified date/time.
+ :type source_if_modified_since: ~datetime.datetime
+ :param source_if_unmodified_since: Specify this header value to operate only on a blob if it
+ has not been modified since the specified date/time.
+ :type source_if_unmodified_since: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'},
+ 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'},
+ 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'},
+ 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(SourceModifiedAccessConditions, self).__init__(**kwargs)
+ self.source_if_match = kwargs.get('source_if_match', None)
+ self.source_if_none_match = kwargs.get('source_if_none_match', None)
+ self.source_if_modified_since = kwargs.get('source_if_modified_since', None)
+ self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None)
+
+
+class StorageError(msrest.serialization.Model):
+ """StorageError.
+
+ :param error: The service error response object.
+ :type error: ~azure.storage.filedatalake.models.StorageErrorError
+ """
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'StorageErrorError'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(StorageError, self).__init__(**kwargs)
+ self.error = kwargs.get('error', None)
+
+
+class StorageErrorError(msrest.serialization.Model):
+ """The service error response object.
+
+ :param code: The service error code.
+ :type code: str
+ :param message: The service error message.
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'Code', 'type': 'str'},
+ 'message': {'key': 'Message', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(StorageErrorError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_models_py3.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_models_py3.py
new file mode 100644
index 00000000000..bbe361c5b9b
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/models/_models_py3.py
@@ -0,0 +1,779 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+import datetime
+from typing import List, Optional
+
+from azure.core.exceptions import HttpResponseError
+import msrest.serialization
+
+
+class AclFailedEntry(msrest.serialization.Model):
+ """AclFailedEntry.
+
+ :param name:
+ :type name: str
+ :param type:
+ :type type: str
+ :param error_message:
+ :type error_message: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'error_message': {'key': 'errorMessage', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ type: Optional[str] = None,
+ error_message: Optional[str] = None,
+ **kwargs
+ ):
+ super(AclFailedEntry, self).__init__(**kwargs)
+ self.name = name
+ self.type = type
+ self.error_message = error_message
+
+
+class BlobHierarchyListSegment(msrest.serialization.Model):
+ """BlobHierarchyListSegment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_prefixes:
+ :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix]
+ :param blob_items: Required.
+ :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal]
+ """
+
+ _validation = {
+ 'blob_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'},
+ 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'},
+ }
+ _xml_map = {
+ 'name': 'Blobs'
+ }
+
+ def __init__(
+ self,
+ *,
+ blob_items: List["BlobItemInternal"],
+ blob_prefixes: Optional[List["BlobPrefix"]] = None,
+ **kwargs
+ ):
+ super(BlobHierarchyListSegment, self).__init__(**kwargs)
+ self.blob_prefixes = blob_prefixes
+ self.blob_items = blob_items
+
+
+class BlobItemInternal(msrest.serialization.Model):
+ """An Azure Storage blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param deleted: Required.
+ :type deleted: bool
+ :param snapshot: Required.
+ :type snapshot: str
+ :param version_id:
+ :type version_id: str
+ :param is_current_version:
+ :type is_current_version: bool
+ :param properties: Required. Properties of a blob.
+ :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal
+ :param deletion_id:
+ :type deletion_id: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'deleted': {'required': True},
+ 'snapshot': {'required': True},
+ 'properties': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str'},
+ 'deleted': {'key': 'Deleted', 'type': 'bool'},
+ 'snapshot': {'key': 'Snapshot', 'type': 'str'},
+ 'version_id': {'key': 'VersionId', 'type': 'str'},
+ 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'},
+ 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'},
+ 'deletion_id': {'key': 'DeletionId', 'type': 'str'},
+ }
+ _xml_map = {
+ 'name': 'Blob'
+ }
+
+ def __init__(
+ self,
+ *,
+ name: str,
+ deleted: bool,
+ snapshot: str,
+ properties: "BlobPropertiesInternal",
+ version_id: Optional[str] = None,
+ is_current_version: Optional[bool] = None,
+ deletion_id: Optional[str] = None,
+ **kwargs
+ ):
+ super(BlobItemInternal, self).__init__(**kwargs)
+ self.name = name
+ self.deleted = deleted
+ self.snapshot = snapshot
+ self.version_id = version_id
+ self.is_current_version = is_current_version
+ self.properties = properties
+ self.deletion_id = deletion_id
+
+
+class BlobPrefix(msrest.serialization.Model):
+ """BlobPrefix.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: str,
+ **kwargs
+ ):
+ super(BlobPrefix, self).__init__(**kwargs)
+ self.name = name
+
+
+class BlobPropertiesInternal(msrest.serialization.Model):
+ """Properties of a blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param creation_time:
+ :type creation_time: ~datetime.datetime
+ :param last_modified: Required.
+ :type last_modified: ~datetime.datetime
+ :param etag: Required.
+ :type etag: str
+ :param content_length: Size in bytes.
+ :type content_length: long
+ :param content_type:
+ :type content_type: str
+ :param content_encoding:
+ :type content_encoding: str
+ :param content_language:
+ :type content_language: str
+ :param content_md5:
+ :type content_md5: bytearray
+ :param content_disposition:
+ :type content_disposition: str
+ :param cache_control:
+ :type cache_control: str
+ :param blob_sequence_number:
+ :type blob_sequence_number: long
+ :param copy_id:
+ :type copy_id: str
+ :param copy_source:
+ :type copy_source: str
+ :param copy_progress:
+ :type copy_progress: str
+ :param copy_completion_time:
+ :type copy_completion_time: ~datetime.datetime
+ :param copy_status_description:
+ :type copy_status_description: str
+ :param server_encrypted:
+ :type server_encrypted: bool
+ :param incremental_copy:
+ :type incremental_copy: bool
+ :param destination_snapshot:
+ :type destination_snapshot: str
+ :param deleted_time:
+ :type deleted_time: ~datetime.datetime
+ :param remaining_retention_days:
+ :type remaining_retention_days: int
+ :param access_tier_inferred:
+ :type access_tier_inferred: bool
+ :param customer_provided_key_sha256:
+ :type customer_provided_key_sha256: str
+ :param encryption_scope: The name of the encryption scope under which the blob is encrypted.
+ :type encryption_scope: str
+ :param access_tier_change_time:
+ :type access_tier_change_time: ~datetime.datetime
+ :param tag_count:
+ :type tag_count: int
+ :param expires_on:
+ :type expires_on: ~datetime.datetime
+ :param is_sealed:
+ :type is_sealed: bool
+ :param last_accessed_on:
+ :type last_accessed_on: ~datetime.datetime
+ :param delete_time:
+ :type delete_time: ~datetime.datetime
+ """
+
+ _validation = {
+ 'last_modified': {'required': True},
+ 'etag': {'required': True},
+ }
+
+ _attribute_map = {
+ 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'},
+ 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'},
+ 'etag': {'key': 'Etag', 'type': 'str'},
+ 'content_length': {'key': 'Content-Length', 'type': 'long'},
+ 'content_type': {'key': 'Content-Type', 'type': 'str'},
+ 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'},
+ 'content_language': {'key': 'Content-Language', 'type': 'str'},
+ 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'},
+ 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'},
+ 'cache_control': {'key': 'Cache-Control', 'type': 'str'},
+ 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'},
+ 'copy_id': {'key': 'CopyId', 'type': 'str'},
+ 'copy_source': {'key': 'CopySource', 'type': 'str'},
+ 'copy_progress': {'key': 'CopyProgress', 'type': 'str'},
+ 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'},
+ 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'},
+ 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'},
+ 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'},
+ 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'},
+ 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'},
+ 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'},
+ 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'},
+ 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'},
+ 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'},
+ 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'},
+ 'tag_count': {'key': 'TagCount', 'type': 'int'},
+ 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'},
+ 'is_sealed': {'key': 'Sealed', 'type': 'bool'},
+ 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'},
+ 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'},
+ }
+ _xml_map = {
+ 'name': 'Properties'
+ }
+
+ def __init__(
+ self,
+ *,
+ last_modified: datetime.datetime,
+ etag: str,
+ creation_time: Optional[datetime.datetime] = None,
+ content_length: Optional[int] = None,
+ content_type: Optional[str] = None,
+ content_encoding: Optional[str] = None,
+ content_language: Optional[str] = None,
+ content_md5: Optional[bytearray] = None,
+ content_disposition: Optional[str] = None,
+ cache_control: Optional[str] = None,
+ blob_sequence_number: Optional[int] = None,
+ copy_id: Optional[str] = None,
+ copy_source: Optional[str] = None,
+ copy_progress: Optional[str] = None,
+ copy_completion_time: Optional[datetime.datetime] = None,
+ copy_status_description: Optional[str] = None,
+ server_encrypted: Optional[bool] = None,
+ incremental_copy: Optional[bool] = None,
+ destination_snapshot: Optional[str] = None,
+ deleted_time: Optional[datetime.datetime] = None,
+ remaining_retention_days: Optional[int] = None,
+ access_tier_inferred: Optional[bool] = None,
+ customer_provided_key_sha256: Optional[str] = None,
+ encryption_scope: Optional[str] = None,
+ access_tier_change_time: Optional[datetime.datetime] = None,
+ tag_count: Optional[int] = None,
+ expires_on: Optional[datetime.datetime] = None,
+ is_sealed: Optional[bool] = None,
+ last_accessed_on: Optional[datetime.datetime] = None,
+ delete_time: Optional[datetime.datetime] = None,
+ **kwargs
+ ):
+ super(BlobPropertiesInternal, self).__init__(**kwargs)
+ self.creation_time = creation_time
+ self.last_modified = last_modified
+ self.etag = etag
+ self.content_length = content_length
+ self.content_type = content_type
+ self.content_encoding = content_encoding
+ self.content_language = content_language
+ self.content_md5 = content_md5
+ self.content_disposition = content_disposition
+ self.cache_control = cache_control
+ self.blob_sequence_number = blob_sequence_number
+ self.copy_id = copy_id
+ self.copy_source = copy_source
+ self.copy_progress = copy_progress
+ self.copy_completion_time = copy_completion_time
+ self.copy_status_description = copy_status_description
+ self.server_encrypted = server_encrypted
+ self.incremental_copy = incremental_copy
+ self.destination_snapshot = destination_snapshot
+ self.deleted_time = deleted_time
+ self.remaining_retention_days = remaining_retention_days
+ self.access_tier_inferred = access_tier_inferred
+ self.customer_provided_key_sha256 = customer_provided_key_sha256
+ self.encryption_scope = encryption_scope
+ self.access_tier_change_time = access_tier_change_time
+ self.tag_count = tag_count
+ self.expires_on = expires_on
+ self.is_sealed = is_sealed
+ self.last_accessed_on = last_accessed_on
+ self.delete_time = delete_time
+
+
+class FileSystem(msrest.serialization.Model):
+ """FileSystem.
+
+ :param name:
+ :type name: str
+ :param last_modified:
+ :type last_modified: str
+ :param e_tag:
+ :type e_tag: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'last_modified': {'key': 'lastModified', 'type': 'str'},
+ 'e_tag': {'key': 'eTag', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ last_modified: Optional[str] = None,
+ e_tag: Optional[str] = None,
+ **kwargs
+ ):
+ super(FileSystem, self).__init__(**kwargs)
+ self.name = name
+ self.last_modified = last_modified
+ self.e_tag = e_tag
+
+
+class FileSystemList(msrest.serialization.Model):
+ """FileSystemList.
+
+ :param filesystems:
+ :type filesystems: list[~azure.storage.filedatalake.models.FileSystem]
+ """
+
+ _attribute_map = {
+ 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ filesystems: Optional[List["FileSystem"]] = None,
+ **kwargs
+ ):
+ super(FileSystemList, self).__init__(**kwargs)
+ self.filesystems = filesystems
+
+
+class LeaseAccessConditions(msrest.serialization.Model):
+ """Parameter group.
+
+ :param lease_id: If specified, the operation only succeeds if the resource's lease is active
+ and matches this ID.
+ :type lease_id: str
+ """
+
+ _attribute_map = {
+ 'lease_id': {'key': 'leaseId', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ lease_id: Optional[str] = None,
+ **kwargs
+ ):
+ super(LeaseAccessConditions, self).__init__(**kwargs)
+ self.lease_id = lease_id
+
+
+class ListBlobsHierarchySegmentResponse(msrest.serialization.Model):
+ """An enumeration of blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param container_name: Required.
+ :type container_name: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param delimiter:
+ :type delimiter: str
+ :param segment: Required.
+ :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_name': {'required': True},
+ 'segment': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str'},
+ 'marker': {'key': 'Marker', 'type': 'str'},
+ 'max_results': {'key': 'MaxResults', 'type': 'int'},
+ 'delimiter': {'key': 'Delimiter', 'type': 'str'},
+ 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str'},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(
+ self,
+ *,
+ service_endpoint: str,
+ container_name: str,
+ segment: "BlobHierarchyListSegment",
+ prefix: Optional[str] = None,
+ marker: Optional[str] = None,
+ max_results: Optional[int] = None,
+ delimiter: Optional[str] = None,
+ next_marker: Optional[str] = None,
+ **kwargs
+ ):
+ super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = service_endpoint
+ self.container_name = container_name
+ self.prefix = prefix
+ self.marker = marker
+ self.max_results = max_results
+ self.delimiter = delimiter
+ self.segment = segment
+ self.next_marker = next_marker
+
+
+class ModifiedAccessConditions(msrest.serialization.Model):
+ """Parameter group.
+
+ :param if_modified_since: Specify this header value to operate only on a blob if it has been
+ modified since the specified date/time.
+ :type if_modified_since: ~datetime.datetime
+ :param if_unmodified_since: Specify this header value to operate only on a blob if it has not
+ been modified since the specified date/time.
+ :type if_unmodified_since: ~datetime.datetime
+ :param if_match: Specify an ETag value to operate only on blobs with a matching value.
+ :type if_match: str
+ :param if_none_match: Specify an ETag value to operate only on blobs without a matching value.
+ :type if_none_match: str
+ """
+
+ _attribute_map = {
+ 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'},
+ 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'},
+ 'if_match': {'key': 'ifMatch', 'type': 'str'},
+ 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ if_modified_since: Optional[datetime.datetime] = None,
+ if_unmodified_since: Optional[datetime.datetime] = None,
+ if_match: Optional[str] = None,
+ if_none_match: Optional[str] = None,
+ **kwargs
+ ):
+ super(ModifiedAccessConditions, self).__init__(**kwargs)
+ self.if_modified_since = if_modified_since
+ self.if_unmodified_since = if_unmodified_since
+ self.if_match = if_match
+ self.if_none_match = if_none_match
+
+
+class Path(msrest.serialization.Model):
+ """Path.
+
+ :param name:
+ :type name: str
+ :param is_directory:
+ :type is_directory: bool
+ :param last_modified:
+ :type last_modified: str
+ :param e_tag:
+ :type e_tag: str
+ :param content_length:
+ :type content_length: long
+ :param owner:
+ :type owner: str
+ :param group:
+ :type group: str
+ :param permissions:
+ :type permissions: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'is_directory': {'key': 'isDirectory', 'type': 'bool'},
+ 'last_modified': {'key': 'lastModified', 'type': 'str'},
+ 'e_tag': {'key': 'eTag', 'type': 'str'},
+ 'content_length': {'key': 'contentLength', 'type': 'long'},
+ 'owner': {'key': 'owner', 'type': 'str'},
+ 'group': {'key': 'group', 'type': 'str'},
+ 'permissions': {'key': 'permissions', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ is_directory: Optional[bool] = False,
+ last_modified: Optional[str] = None,
+ e_tag: Optional[str] = None,
+ content_length: Optional[int] = None,
+ owner: Optional[str] = None,
+ group: Optional[str] = None,
+ permissions: Optional[str] = None,
+ **kwargs
+ ):
+ super(Path, self).__init__(**kwargs)
+ self.name = name
+ self.is_directory = is_directory
+ self.last_modified = last_modified
+ self.e_tag = e_tag
+ self.content_length = content_length
+ self.owner = owner
+ self.group = group
+ self.permissions = permissions
+
+
+class PathHTTPHeaders(msrest.serialization.Model):
+ """Parameter group.
+
+ :param cache_control: Optional. Sets the blob's cache control. If specified, this property is
+ stored with the blob and returned with a read request.
+ :type cache_control: str
+ :param content_encoding: Optional. Sets the blob's content encoding. If specified, this
+ property is stored with the blob and returned with a read request.
+ :type content_encoding: str
+ :param content_language: Optional. Set the blob's content language. If specified, this property
+ is stored with the blob and returned with a read request.
+ :type content_language: str
+ :param content_disposition: Optional. Sets the blob's Content-Disposition header.
+ :type content_disposition: str
+ :param content_type: Optional. Sets the blob's content type. If specified, this property is
+ stored with the blob and returned with a read request.
+ :type content_type: str
+ :param content_md5: Specify the transactional md5 for the body, to be validated by the service.
+ :type content_md5: bytearray
+ :param transactional_content_hash: Specify the transactional md5 for the body, to be validated
+ by the service.
+ :type transactional_content_hash: bytearray
+ """
+
+ _attribute_map = {
+ 'cache_control': {'key': 'cacheControl', 'type': 'str'},
+ 'content_encoding': {'key': 'contentEncoding', 'type': 'str'},
+ 'content_language': {'key': 'contentLanguage', 'type': 'str'},
+ 'content_disposition': {'key': 'contentDisposition', 'type': 'str'},
+ 'content_type': {'key': 'contentType', 'type': 'str'},
+ 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'},
+ 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'},
+ }
+
+ def __init__(
+ self,
+ *,
+ cache_control: Optional[str] = None,
+ content_encoding: Optional[str] = None,
+ content_language: Optional[str] = None,
+ content_disposition: Optional[str] = None,
+ content_type: Optional[str] = None,
+ content_md5: Optional[bytearray] = None,
+ transactional_content_hash: Optional[bytearray] = None,
+ **kwargs
+ ):
+ super(PathHTTPHeaders, self).__init__(**kwargs)
+ self.cache_control = cache_control
+ self.content_encoding = content_encoding
+ self.content_language = content_language
+ self.content_disposition = content_disposition
+ self.content_type = content_type
+ self.content_md5 = content_md5
+ self.transactional_content_hash = transactional_content_hash
+
+
+class PathList(msrest.serialization.Model):
+ """PathList.
+
+ :param paths:
+ :type paths: list[~azure.storage.filedatalake.models.Path]
+ """
+
+ _attribute_map = {
+ 'paths': {'key': 'paths', 'type': '[Path]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ paths: Optional[List["Path"]] = None,
+ **kwargs
+ ):
+ super(PathList, self).__init__(**kwargs)
+ self.paths = paths
+
+
+class SetAccessControlRecursiveResponse(msrest.serialization.Model):
+ """SetAccessControlRecursiveResponse.
+
+ :param directories_successful:
+ :type directories_successful: int
+ :param files_successful:
+ :type files_successful: int
+ :param failure_count:
+ :type failure_count: int
+ :param failed_entries:
+ :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry]
+ """
+
+ _attribute_map = {
+ 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'},
+ 'files_successful': {'key': 'filesSuccessful', 'type': 'int'},
+ 'failure_count': {'key': 'failureCount', 'type': 'int'},
+ 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ directories_successful: Optional[int] = None,
+ files_successful: Optional[int] = None,
+ failure_count: Optional[int] = None,
+ failed_entries: Optional[List["AclFailedEntry"]] = None,
+ **kwargs
+ ):
+ super(SetAccessControlRecursiveResponse, self).__init__(**kwargs)
+ self.directories_successful = directories_successful
+ self.files_successful = files_successful
+ self.failure_count = failure_count
+ self.failed_entries = failed_entries
+
+
+class SourceModifiedAccessConditions(msrest.serialization.Model):
+ """Parameter group.
+
+ :param source_if_match: Specify an ETag value to operate only on blobs with a matching value.
+ :type source_if_match: str
+ :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching
+ value.
+ :type source_if_none_match: str
+ :param source_if_modified_since: Specify this header value to operate only on a blob if it has
+ been modified since the specified date/time.
+ :type source_if_modified_since: ~datetime.datetime
+ :param source_if_unmodified_since: Specify this header value to operate only on a blob if it
+ has not been modified since the specified date/time.
+ :type source_if_unmodified_since: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'},
+ 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'},
+ 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'},
+ 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'},
+ }
+
+ def __init__(
+ self,
+ *,
+ source_if_match: Optional[str] = None,
+ source_if_none_match: Optional[str] = None,
+ source_if_modified_since: Optional[datetime.datetime] = None,
+ source_if_unmodified_since: Optional[datetime.datetime] = None,
+ **kwargs
+ ):
+ super(SourceModifiedAccessConditions, self).__init__(**kwargs)
+ self.source_if_match = source_if_match
+ self.source_if_none_match = source_if_none_match
+ self.source_if_modified_since = source_if_modified_since
+ self.source_if_unmodified_since = source_if_unmodified_since
+
+
+class StorageError(msrest.serialization.Model):
+ """StorageError.
+
+ :param error: The service error response object.
+ :type error: ~azure.storage.filedatalake.models.StorageErrorError
+ """
+
+ _attribute_map = {
+ 'error': {'key': 'error', 'type': 'StorageErrorError'},
+ }
+
+ def __init__(
+ self,
+ *,
+ error: Optional["StorageErrorError"] = None,
+ **kwargs
+ ):
+ super(StorageError, self).__init__(**kwargs)
+ self.error = error
+
+
+class StorageErrorError(msrest.serialization.Model):
+ """The service error response object.
+
+ :param code: The service error code.
+ :type code: str
+ :param message: The service error message.
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'Code', 'type': 'str'},
+ 'message': {'key': 'Message', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ code: Optional[str] = None,
+ message: Optional[str] = None,
+ **kwargs
+ ):
+ super(StorageErrorError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/__init__.py
new file mode 100644
index 00000000000..0db71e00342
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/__init__.py
@@ -0,0 +1,17 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._service_operations import ServiceOperations
+from ._file_system_operations import FileSystemOperations
+from ._path_operations import PathOperations
+
+__all__ = [
+ 'ServiceOperations',
+ 'FileSystemOperations',
+ 'PathOperations',
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py
new file mode 100644
index 00000000000..991890ac4e2
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py
@@ -0,0 +1,643 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+
+from .. import models as _models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class FileSystemOperations(object):
+ """FileSystemOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~azure.storage.filedatalake.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = _models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def create(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ properties=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Create FileSystem.
+
+ Create a FileSystem rooted at the specified location. If the FileSystem already exists, the
+ operation fails. This operation does not support conditional HTTP requests.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ accept = "application/json"
+
+ # Construct URL
+ url = self.create.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ create.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ def set_properties(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ properties=None, # type: Optional[str]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Set FileSystem Properties.
+
+ Set properties for the FileSystem. This operation supports conditional HTTP requests. For
+ more information, see `Specifying Conditional Headers for Blob Service Operations
+ `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if modified_access_conditions is not None:
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_properties.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ set_properties.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ def get_properties(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Get FileSystem Properties.
+
+ All system and user-defined filesystem properties are specified in the response headers.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get_properties.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ get_properties.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ def delete(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Delete FileSystem.
+
+ Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same
+ identifier cannot be created for at least 30 seconds. While the filesystem is being deleted,
+ attempts to create a filesystem with the same identifier will fail with status code 409
+ (Conflict), with the service returning additional error information indicating that the
+ filesystem is being deleted. All other operations, including operations on any files or
+ directories within the filesystem, will fail with status code 404 (Not Found) while the
+ filesystem is being deleted. This operation supports conditional HTTP requests. For more
+ information, see `Specifying Conditional Headers for Blob Service Operations
+ `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if modified_access_conditions is not None:
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.delete.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ delete.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ def list_paths(
+ self,
+ recursive, # type: bool
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ continuation=None, # type: Optional[str]
+ path=None, # type: Optional[str]
+ max_results=None, # type: Optional[int]
+ upn=None, # type: Optional[bool]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["_models.PathList"]
+ """List Paths.
+
+ List FileSystem paths and their properties.
+
+ :param recursive: Required.
+ :type recursive: bool
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param path: Optional. Filters results to paths within the specified directory. An error
+ occurs if the directory does not exist.
+ :type path: str
+ :param max_results: An optional value that specifies the maximum number of items to return. If
+ omitted or greater than 5,000, the response will include up to 5,000 items.
+ :type max_results: int
+ :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+ "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+ headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If
+ "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+ false. Note that group and application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either PathList or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.PathList]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ accept = "application/json"
+
+ # TODO: change this once continuation/next_link autorest PR is merged
+ def prepare_request(next_link=None, cont_token=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter",
+ request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version,
+ 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_paths.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource,
+ 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ # TODO: change this once continuation/next_link autorest PR is merged
+ if cont_token is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str')
+ if path is not None:
+ query_parameters['directory'] = self._serialize.query("path", path, 'str')
+ query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ # TODO: change this once continuation/next_link autorest PR is merged
+ try:
+ cont_token = pipeline_response.http_response.headers['x-ms-continuation']
+ except KeyError:
+ cont_token = None
+ deserialized = self._deserialize('PathList', pipeline_response)
+ list_of_elem = deserialized.paths
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ # TODO: change this once continuation/next_link autorest PR is merged
+ return cont_token, iter(list_of_elem)
+
+ # TODO: change this once continuation/next_link autorest PR is merged
+ def get_next(cont_token=None):
+ cont_token = cont_token if not continuation else continuation
+ request = prepare_request(cont_token=cont_token)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, model=error)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+
+ list_paths.metadata = {'url': '/{filesystem}'} # type: ignore
+
+ def list_blob_hierarchy_segment(
+ self,
+ prefix=None, # type: Optional[str]
+ delimiter=None, # type: Optional[str]
+ marker=None, # type: Optional[str]
+ max_results=None, # type: Optional[int]
+ include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]]
+ showonly="deleted", # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ request_id_parameter=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> "_models.ListBlobsHierarchySegmentResponse"
+ """The List Blobs operation returns a list of the blobs under the specified container.
+
+ :param prefix: Filters results to filesystems within the specified prefix.
+ :type prefix: str
+ :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix
+ element in the response body that acts as a placeholder for all blobs whose names begin with
+ the same substring up to the appearance of the delimiter character. The delimiter may be a
+ single character or a string.
+ :type delimiter: str
+ :param marker: A string value that identifies the portion of the list of containers to be
+ returned with the next listing operation. The operation returns the NextMarker value within the
+ response body if the listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value for the marker parameter
+ in a subsequent call to request the next page of list items. The marker value is opaque to the
+ client.
+ :type marker: str
+ :param max_results: An optional value that specifies the maximum number of items to return. If
+ omitted or greater than 5,000, the response will include up to 5,000 items.
+ :type max_results: int
+ :param include: Include this parameter to specify one or more datasets to include in the
+ response.
+ :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem]
+ :param showonly: Include this parameter to specify one or more datasets to include in the
+ response.
+ :type showonly: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ListBlobsHierarchySegmentResponse, or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ restype = "container"
+ comp = "list"
+ accept = "application/xml"
+
+ # Construct URL
+ url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if delimiter is not None:
+ query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',')
+ if showonly is not None:
+ query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_path_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_path_operations.py
new file mode 100644
index 00000000000..5517c96888b
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_path_operations.py
@@ -0,0 +1,1789 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+import datetime
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+
+from .. import models as _models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class PathOperations(object):
+ """PathOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~azure.storage.filedatalake.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = _models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def create(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ resource=None, # type: Optional[Union[str, "_models.PathResourceType"]]
+ continuation=None, # type: Optional[str]
+ mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]]
+ rename_source=None, # type: Optional[str]
+ source_lease_id=None, # type: Optional[str]
+ properties=None, # type: Optional[str]
+ permissions=None, # type: Optional[str]
+ umask=None, # type: Optional[str]
+ path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Create File | Create Directory | Rename File | Rename Directory.
+
+ Create or rename a file or directory. By default, the destination is overwritten and if the
+ destination already exists and has a lease the lease is broken. This operation supports
+ conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob
+ Service Operations `_. To fail if the destination already exists,
+ use a conditional request with If-None-Match: "*".
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param resource: Required only for Create File and Create Directory. The value must be "file"
+ or "directory".
+ :type resource: str or ~azure.storage.filedatalake.models.PathResourceType
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param mode: Optional. Valid only when namespace is enabled. This parameter determines the
+ behavior of the rename operation. The value must be "legacy" or "posix", and the default value
+ will be "posix".
+ :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode
+ :param rename_source: An optional file or directory to be renamed. The value must have the
+ following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties
+ will overwrite the existing properties; otherwise, the existing properties will be preserved.
+ This value must be a URL percent-encoded string. Note that the string may only contain ASCII
+ characters in the ISO-8859-1 character set.
+ :type rename_source: str
+ :param source_lease_id: A lease ID for the source path. If specified, the source path must have
+ an active lease and the lease ID must match.
+ :type source_lease_id: str
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+ account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+ Each class may be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ :type permissions: str
+ :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL, the umask
+ restricts the permissions of the file or directory to be created. The resulting permission is
+ given by p bitwise and not u, where p is the permission and u is the umask. For example, if p
+ is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777
+ for a directory and 0666 for a file. The default umask is 0027. The umask must be specified
+ in 4-digit octal notation (e.g. 0766).
+ :type umask: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Parameter group.
+ :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _cache_control = None
+ _content_encoding = None
+ _content_language = None
+ _content_disposition = None
+ _content_type = None
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ _source_if_match = None
+ _source_if_none_match = None
+ _source_if_modified_since = None
+ _source_if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if path_http_headers is not None:
+ _cache_control = path_http_headers.cache_control
+ _content_encoding = path_http_headers.content_encoding
+ _content_language = path_http_headers.content_language
+ _content_disposition = path_http_headers.content_disposition
+ _content_type = path_http_headers.content_type
+ if source_modified_access_conditions is not None:
+ _source_if_match = source_modified_access_conditions.source_if_match
+ _source_if_none_match = source_modified_access_conditions.source_if_none_match
+ _source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.create.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if resource is not None:
+ query_parameters['resource'] = self._serialize.query("resource", resource, 'str')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if mode is not None:
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
+ if _content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
+ if _content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
+ if _content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
+ if _content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
+ if rename_source is not None:
+ header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if source_lease_id is not None:
+ header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ if _source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str')
+ if _source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str')
+ if _source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123')
+ if _source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def update(
+ self,
+ action, # type: Union[str, "_models.PathUpdateAction"]
+ mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"]
+ body, # type: IO
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ max_records=None, # type: Optional[int]
+ continuation=None, # type: Optional[str]
+ force_flag=None, # type: Optional[bool]
+ position=None, # type: Optional[int]
+ retain_uncommitted_data=None, # type: Optional[bool]
+ close=None, # type: Optional[bool]
+ content_length=None, # type: Optional[int]
+ properties=None, # type: Optional[str]
+ owner=None, # type: Optional[str]
+ group=None, # type: Optional[str]
+ permissions=None, # type: Optional[str]
+ acl=None, # type: Optional[str]
+ path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Optional["_models.SetAccessControlRecursiveResponse"]
+ """Append Data | Flush Data | Set Properties | Set Access Control.
+
+ Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file,
+ sets properties for a file or directory, or sets access control for a file or directory. Data
+ can only be appended to a file. Concurrent writes to the same file using multiple clients are
+ not supported. This operation supports conditional HTTP requests. For more information, see
+ `Specifying Conditional Headers for Blob Service Operations `_.
+
+ :param action: The action must be "append" to upload data to be appended to a file, "flush" to
+ flush previously uploaded data to a file, "setProperties" to set the properties of a file or
+ directory, "setAccessControl" to set the owner, group, permissions, or access control list for
+ a file or directory, or "setAccessControlRecursive" to set the access control list for a
+ directory recursively. Note that Hierarchical Namespace must be enabled for the account in
+ order to use access control. Also note that the Access Control List (ACL) includes permissions
+ for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers
+ are mutually exclusive.
+ :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction
+ :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+ modifies one or more POSIX access control rights that pre-exist on files and directories,
+ "remove" removes one or more POSIX access control rights that were present earlier on files
+ and directories.
+ :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+ :param body: Initial data.
+ :type body: IO
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the
+ maximum number of files or directories on which the acl change will be applied. If omitted or
+ greater than 2,000, the request will process up to 2,000 items.
+ :type max_records: int
+ :param continuation: Optional. The number of paths processed with each invocation is limited.
+ If the number of paths to be processed exceeds this limit, a continuation token is returned in
+ the response header x-ms-continuation. When a continuation token is returned in the response,
+ it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive
+ operation.
+ :type continuation: str
+ :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+ the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+ will ignore user errors and proceed with the operation on other sub-entities of the directory.
+ Continuation token will only be returned when forceFlag is true in case of user errors. If not
+ set the default value is false for this.
+ :type force_flag: bool
+ :param position: This parameter allows the caller to upload data in parallel and control the
+ order in which it is appended to the file. It is required when uploading data to be appended
+ to the file and when flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not immediately flushed, or
+ written, to the file. To flush, the previously uploaded data must be contiguous, the position
+ parameter must be specified and equal to the length of the file after all data has been
+ written, and there must not be a request entity body included with the request.
+ :type position: long
+ :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data
+ is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+ after the flush operation. The default is false. Data at offsets less than the specified
+ position are written to the file when flush succeeds, but this optional parameter allows data
+ after the flush position to be retained for a future flush operation.
+ :type retain_uncommitted_data: bool
+ :param close: Azure Storage Events allow applications to receive notifications when files
+ change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+ property indicating whether this is the final change to distinguish the difference between an
+ intermediate flush to a file stream and the final close of a file stream. The close query
+ parameter is valid only when the action is "flush" and change notifications are enabled. If the
+ value of close is "true" and the flush operation completes successfully, the service raises a
+ file change notification with a property indicating that this is the final update (the file
+ stream has been closed). If "false" a change notification is raised indicating the file has
+ changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+ indicate that the file stream has been closed.".
+ :type close: bool
+ :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush
+ Data". Must be the length of the request content in bytes for "Append Data".
+ :type content_length: long
+ :param properties: Optional. User-defined properties to be stored with the filesystem, in the
+ format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value
+ is a base64 encoded string. Note that the string may only contain ASCII characters in the
+ ISO-8859-1 character set. If the filesystem exists, any properties not included in the list
+ will be removed. All properties are removed if the header is omitted. To merge new and
+ existing properties, first get all existing properties and the current E-Tag, then make a
+ conditional request with the E-Tag and include values for all properties.
+ :type properties: str
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+ account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+ Each class may be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ :type permissions: str
+ :param acl: Sets POSIX access control rights on files and directories. The value is a comma-
+ separated list of access control entries. Each access control entry (ACE) consists of a scope,
+ a type, a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: SetAccessControlRecursiveResponse, or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _content_md5 = None
+ _lease_id = None
+ _cache_control = None
+ _content_type = None
+ _content_disposition = None
+ _content_encoding = None
+ _content_language = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if path_http_headers is not None:
+ _content_md5 = path_http_headers.content_md5
+ _cache_control = path_http_headers.cache_control
+ _content_type = path_http_headers.content_type
+ _content_disposition = path_http_headers.content_disposition
+ _content_encoding = path_http_headers.content_encoding
+ _content_language = path_http_headers.content_language
+ content_type = kwargs.pop("content_type", "application/octet-stream")
+ accept = "application/json"
+
+ # Construct URL
+ url = self.update.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if max_records is not None:
+ query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1)
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'str')
+ if force_flag is not None:
+ query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool')
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if retain_uncommitted_data is not None:
+ query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
+ if close is not None:
+ query_parameters['close'] = self._serialize.query("close", close, 'bool')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if _content_md5 is not None:
+ header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
+ if _content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
+ if _content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
+ if _content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
+ if _content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
+ if properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str')
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ body_content_kwargs['stream_content'] = body
+ request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ deserialized = None
+ if response.status_code == 200:
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response)
+
+ if response.status_code == 202:
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def lease(
+ self,
+ x_ms_lease_action, # type: Union[str, "_models.PathLeaseAction"]
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ x_ms_lease_duration=None, # type: Optional[int]
+ x_ms_lease_break_period=None, # type: Optional[int]
+ proposed_lease_id=None, # type: Optional[str]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Lease Path.
+
+ Create and manage a lease to restrict write and delete access to the path. This operation
+ supports conditional HTTP requests. For more information, see `Specifying Conditional Headers
+ for Blob Service Operations `_.
+
+ :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew",
+ and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration"
+ to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the
+ lease break period is allowed to elapse, during which time no lease operation except break and
+ release can be performed on the file. When a lease is successfully broken, the response
+ indicates the interval in seconds until a new lease can be acquired. Use "change" and specify
+ the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to
+ change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an
+ existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease.
+ :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies
+ the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or
+ -1 for infinite lease.
+ :type x_ms_lease_duration: int
+ :param x_ms_lease_break_period: The lease break period duration is optional to break a lease,
+ and specifies the break period of the lease in seconds. The lease break duration must be
+ between 0 and 60 seconds.
+ :type x_ms_lease_break_period: int
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
+ Constructor (String) for a list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.lease.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str')
+ if x_ms_lease_duration is not None:
+ header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int')
+ if x_ms_lease_break_period is not None:
+ header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if proposed_lease_id is not None:
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
+
+ if response.status_code == 201:
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
+
+ if response.status_code == 202:
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def read(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ range=None, # type: Optional[str]
+ x_ms_range_get_content_md5=None, # type: Optional[bool]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> IO
+ """Read File.
+
+ Read the contents of a file. For read operations, range requests are supported. This operation
+ supports conditional HTTP requests. For more information, see `Specifying Conditional Headers
+ for Blob Service Operations `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param range: The HTTP Range request header specifies one or more byte ranges of the resource
+ to be retrieved.
+ :type range: str
+ :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified
+ together with the Range header, the service returns the MD5 hash for the range, as long as the
+ range is less than or equal to 4MB in size. If this header is specified without the Range
+ header, the service returns status code 400 (Bad Request). If this header is set to true when
+ the range exceeds 4 MB in size, the service returns status code 400 (Bad Request).
+ :type x_ms_range_get_content_md5: bool
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: IO, or the result of cls(response)
+ :rtype: IO
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[IO]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.read.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if range is not None:
+ header_parameters['Range'] = self._serialize.header("range", range, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if x_ms_range_get_content_md5 is not None:
+ header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 206]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ if response.status_code == 200:
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
+ response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
+ response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
+ deserialized = response.stream_download(self._client._pipeline)
+
+ if response.status_code == 206:
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
+ response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
+ response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
+ deserialized = response.stream_download(self._client._pipeline)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def get_properties(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ action=None, # type: Optional[Union[str, "_models.PathGetPropertiesAction"]]
+ upn=None, # type: Optional[bool]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Get Properties | Get Status | Get Access Control List.
+
+ Get Properties returns all system and user defined properties for a path. Get Status returns
+ all system defined properties for a path. Get Access Control List returns the access control
+ list for a path. This operation supports conditional HTTP requests. For more information, see
+ `Specifying Conditional Headers for Blob Service Operations `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param action: Optional. If the value is "getStatus" only the system defined properties for the
+ path are returned. If the value is "getAccessControl" the access control list is returned in
+ the response headers (Hierarchical Namespace must be enabled for the account), otherwise the
+ properties are returned.
+ :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction
+ :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If
+ "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response
+ headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If
+ "false", the values will be returned as Azure Active Directory Object IDs. The default value is
+ false. Note that group and application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get_properties.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if action is not None:
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges'))
+ response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control'))
+ response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition'))
+ response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding'))
+ response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range'))
+ response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
+ response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties'))
+ response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner'))
+ response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group'))
+ response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions'))
+ response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl'))
+ response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
+ response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
+ response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def delete(
+ self,
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ recursive=None, # type: Optional[bool]
+ continuation=None, # type: Optional[str]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Delete File | Delete Directory.
+
+ Delete the file or directory. This operation supports conditional HTTP requests. For more
+ information, see `Specifying Conditional Headers for Blob Service Operations
+ `_.
+
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param recursive: Required.
+ :type recursive: bool
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ accept = "application/json"
+
+ # Construct URL
+ url = self.delete.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if recursive is not None:
+ query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def set_access_control(
+ self,
+ timeout=None, # type: Optional[int]
+ owner=None, # type: Optional[str]
+ group=None, # type: Optional[str]
+ permissions=None, # type: Optional[str]
+ acl=None, # type: Optional[str]
+ request_id_parameter=None, # type: Optional[str]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Set the owner, group, permissions, or access control list for a path.
+
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the
+ account. Sets POSIX access permissions for the file owner, the file owning group, and others.
+ Each class may be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported.
+ :type permissions: str
+ :param acl: Sets POSIX access control rights on files and directories. The value is a comma-
+ separated list of access control entries. Each access control entry (ACE) consists of a scope,
+ a type, a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _lease_id = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ action = "setAccessControl"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_access_control.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str')
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def set_access_control_recursive(
+ self,
+ mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"]
+ timeout=None, # type: Optional[int]
+ continuation=None, # type: Optional[str]
+ force_flag=None, # type: Optional[bool]
+ max_records=None, # type: Optional[int]
+ acl=None, # type: Optional[str]
+ request_id_parameter=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> "_models.SetAccessControlRecursiveResponse"
+ """Set the access control list for a path and subpaths.
+
+ :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify"
+ modifies one or more POSIX access control rights that pre-exist on files and directories,
+ "remove" removes one or more POSIX access control rights that were present earlier on files
+ and directories.
+ :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false,
+ the operation will terminate quickly on encountering user errors (4XX). If true, the operation
+ will ignore user errors and proceed with the operation on other sub-entities of the directory.
+ Continuation token will only be returned when forceFlag is true in case of user errors. If not
+ set the default value is false for this.
+ :type force_flag: bool
+ :param max_records: Optional. It specifies the maximum number of files or directories on which
+ the acl change will be applied. If omitted or greater than 2,000, the request will process up
+ to 2,000 items.
+ :type max_records: int
+ :param acl: Sets POSIX access control rights on files and directories. The value is a comma-
+ separated list of access control entries. Each access control entry (ACE) consists of a scope,
+ a type, a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: SetAccessControlRecursiveResponse, or the result of cls(response)
+ :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ action = "setAccessControlRecursive"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_access_control_recursive.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ query_parameters['mode'] = self._serialize.query("mode", mode, 'str')
+ if force_flag is not None:
+ query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool')
+ if max_records is not None:
+ query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, response_headers)
+
+ return deserialized
+ set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def flush_data(
+ self,
+ timeout=None, # type: Optional[int]
+ position=None, # type: Optional[int]
+ retain_uncommitted_data=None, # type: Optional[bool]
+ close=None, # type: Optional[bool]
+ content_length=None, # type: Optional[int]
+ request_id_parameter=None, # type: Optional[str]
+ path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Set the owner, group, permissions, or access control list for a path.
+
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param position: This parameter allows the caller to upload data in parallel and control the
+ order in which it is appended to the file. It is required when uploading data to be appended
+ to the file and when flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not immediately flushed, or
+ written, to the file. To flush, the previously uploaded data must be contiguous, the position
+ parameter must be specified and equal to the length of the file after all data has been
+ written, and there must not be a request entity body included with the request.
+ :type position: long
+ :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data
+ is retained after the flush operation completes; otherwise, the uncommitted data is deleted
+ after the flush operation. The default is false. Data at offsets less than the specified
+ position are written to the file when flush succeeds, but this optional parameter allows data
+ after the flush position to be retained for a future flush operation.
+ :type retain_uncommitted_data: bool
+ :param close: Azure Storage Events allow applications to receive notifications when files
+ change. When Azure Storage Events are enabled, a file changed event is raised. This event has a
+ property indicating whether this is the final change to distinguish the difference between an
+ intermediate flush to a file stream and the final close of a file stream. The close query
+ parameter is valid only when the action is "flush" and change notifications are enabled. If the
+ value of close is "true" and the flush operation completes successfully, the service raises a
+ file change notification with a property indicating that this is the final update (the file
+ stream has been closed). If "false" a change notification is raised indicating the file has
+ changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to
+ indicate that the file stream has been closed.".
+ :type close: bool
+ :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush
+ Data". Must be the length of the request content in bytes for "Append Data".
+ :type content_length: long
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :param modified_access_conditions: Parameter group.
+ :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _content_md5 = None
+ _lease_id = None
+ _cache_control = None
+ _content_type = None
+ _content_disposition = None
+ _content_encoding = None
+ _content_language = None
+ _if_match = None
+ _if_none_match = None
+ _if_modified_since = None
+ _if_unmodified_since = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if modified_access_conditions is not None:
+ _if_match = modified_access_conditions.if_match
+ _if_none_match = modified_access_conditions.if_none_match
+ _if_modified_since = modified_access_conditions.if_modified_since
+ _if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if path_http_headers is not None:
+ _content_md5 = path_http_headers.content_md5
+ _cache_control = path_http_headers.cache_control
+ _content_type = path_http_headers.content_type
+ _content_disposition = path_http_headers.content_disposition
+ _content_encoding = path_http_headers.content_encoding
+ _content_language = path_http_headers.content_language
+ action = "flush"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.flush_data.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if retain_uncommitted_data is not None:
+ query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool')
+ if close is not None:
+ query_parameters['close'] = self._serialize.query("close", close, 'bool')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if _content_md5 is not None:
+ header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if _cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str')
+ if _content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str')
+ if _content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str')
+ if _content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str')
+ if _content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str')
+ if _if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str')
+ if _if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
+ if _if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123')
+ if _if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def append_data(
+ self,
+ body, # type: IO
+ position=None, # type: Optional[int]
+ timeout=None, # type: Optional[int]
+ content_length=None, # type: Optional[int]
+ transactional_content_crc64=None, # type: Optional[bytearray]
+ request_id_parameter=None, # type: Optional[str]
+ path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"]
+ lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Append data to the file.
+
+ :param body: Initial data.
+ :type body: IO
+ :param position: This parameter allows the caller to upload data in parallel and control the
+ order in which it is appended to the file. It is required when uploading data to be appended
+ to the file and when flushing previously uploaded data to the file. The value must be the
+ position where the data is to be appended. Uploaded data is not immediately flushed, or
+ written, to the file. To flush, the previously uploaded data must be contiguous, the position
+ parameter must be specified and equal to the length of the file after all data has been
+ written, and there must not be a request entity body included with the request.
+ :type position: long
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush
+ Data". Must be the length of the request content in bytes for "Append Data".
+ :type content_length: long
+ :param transactional_content_crc64: Specify the transactional crc64 for the body, to be
+ validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param path_http_headers: Parameter group.
+ :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders
+ :param lease_access_conditions: Parameter group.
+ :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ _transactional_content_hash = None
+ _lease_id = None
+ if lease_access_conditions is not None:
+ _lease_id = lease_access_conditions.lease_id
+ if path_http_headers is not None:
+ _transactional_content_hash = path_http_headers.transactional_content_hash
+ action = "append"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self.append_data.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+ if position is not None:
+ query_parameters['position'] = self._serialize.query("position", position, 'long')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if content_length is not None:
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0)
+ if _transactional_content_hash is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if _lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ body_content_kwargs['stream_content'] = body
+ request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5'))
+ response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64'))
+ response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def set_expiry(
+ self,
+ expiry_options, # type: Union[str, "_models.PathExpiryOptions"]
+ timeout=None, # type: Optional[int]
+ request_id_parameter=None, # type: Optional[str]
+ expires_on=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Sets the time a blob will expire and be deleted.
+
+ :param expiry_options: Required. Indicates mode of the expiry time.
+ :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param expires_on: The time to set the blob to expiry.
+ :type expires_on: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ comp = "expiry"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.set_expiry.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str')
+ if expires_on is not None:
+ header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
+ response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
+
+ def undelete(
+ self,
+ timeout=None, # type: Optional[int]
+ undelete_source=None, # type: Optional[str]
+ request_id_parameter=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """Undelete a path that was previously soft deleted.
+
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of
+ the soft deleted blob to undelete.
+ :type undelete_source: str
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: None, or the result of cls(response)
+ :rtype: None
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ comp = "undelete"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.undelete.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if undelete_source is not None:
+ header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ raise HttpResponseError(response=response, model=error)
+
+ response_headers = {}
+ response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
+ response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
+ response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type'))
+ response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
+ response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
+
+ if cls:
+ return cls(pipeline_response, None, response_headers)
+
+ undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_service_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_service_operations.py
new file mode 100644
index 00000000000..2db3801abff
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/operations/_service_operations.py
@@ -0,0 +1,153 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+
+from .. import models as _models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class ServiceOperations(object):
+ """ServiceOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~azure.storage.filedatalake.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = _models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list_file_systems(
+ self,
+ prefix=None, # type: Optional[str]
+ continuation=None, # type: Optional[str]
+ max_results=None, # type: Optional[int]
+ request_id_parameter=None, # type: Optional[str]
+ timeout=None, # type: Optional[int]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["_models.FileSystemList"]
+ """List FileSystems.
+
+ List filesystems and their properties in given account.
+
+ :param prefix: Filters results to filesystems within the specified prefix.
+ :type prefix: str
+ :param continuation: Optional. When deleting a directory, the number of paths that are deleted
+ with each invocation is limited. If the number of paths to be deleted exceeds this limit, a
+ continuation token is returned in this response header. When a continuation token is returned
+ in the response, it must be specified in a subsequent invocation of the delete operation to
+ continue deleting the directory.
+ :type continuation: str
+ :param max_results: An optional value that specifies the maximum number of items to return. If
+ omitted or greater than 5,000, the response will include up to 5,000 items.
+ :type max_results: int
+ :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
+ limit that is recorded in the analytics logs when storage analytics logging is enabled.
+ :type request_id_parameter: str
+ :param timeout: The timeout parameter is expressed in seconds. For more information, see
+ :code:`Setting Timeouts for Blob Service Operations.`.
+ :type timeout: int
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either FileSystemList or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystemList]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ resource = "account"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ if request_id_parameter is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_file_systems.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['resource'] = self._serialize.query("resource", resource, 'str')
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if continuation is not None:
+ query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str')
+ if max_results is not None:
+ query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1)
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize('FileSystemList', pipeline_response)
+ list_of_elem = deserialized.filesystems
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, model=error)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+ list_file_systems.metadata = {'url': '/'} # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/version.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/version.py
new file mode 100644
index 00000000000..1bb78b965c0
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_generated/version.py
@@ -0,0 +1,13 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+VERSION = "2020-06-12"
+
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_list_paths_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_list_paths_helper.py
new file mode 100644
index 00000000000..543e1e11578
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_list_paths_helper.py
@@ -0,0 +1,108 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from azure.core.paging import PageIterator
+from azure.core.exceptions import HttpResponseError
+from ._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code
+from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+from ._shared.models import DictMixin
+from ._shared.response_handlers import return_context_and_deserialized
+
+
+class DeletedPathPropertiesPaged(PageIterator):
+ """An Iterable of deleted path properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A path name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties)
+ :ivar str container: The container that the paths are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+ :param callable command: Function to retrieve the next page of items.
+ """
+ def __init__(
+ self, command,
+ container=None,
+ prefix=None,
+ results_per_page=None,
+ continuation_token=None,
+ delimiter=None,
+ location_mode=None):
+ super(DeletedPathPropertiesPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.prefix = prefix
+ self.marker = None
+ self.results_per_page = results_per_page
+ self.container = container
+ self.delimiter = delimiter
+ self.current_page = None
+ self.location_mode = location_mode
+
+ def _get_next_cb(self, continuation_token):
+ try:
+ return self._command(
+ prefix=self.prefix,
+ marker=continuation_token or None,
+ max_results=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.prefix = self._response.prefix
+ self.marker = self._response.marker
+ self.results_per_page = self._response.max_results
+ self.container = self._response.container_name
+ self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+ self.current_page = [self._build_item(item) for item in self.current_page]
+ self.delimiter = self._response.delimiter
+
+ return self._response.next_marker or None, self.current_page
+
+ def _build_item(self, item):
+ if isinstance(item, BlobItemInternal):
+ file_props = get_deleted_path_properties_from_generated_code(item)
+ file_props.file_system = self.container
+ return file_props
+ if isinstance(item, GenBlobPrefix):
+ return DirectoryPrefix(
+ container=self.container,
+ prefix=item.name,
+ results_per_page=self.results_per_page,
+ location_mode=self.location_mode)
+ return item
+
+
+class DirectoryPrefix(DictMixin):
+ """Directory prefix.
+
+ :ivar str name: Name of the deleted directory.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar str file_system: The file system that the deleted paths are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+ """
+ def __init__(self, **kwargs):
+ self.name = kwargs.get('prefix')
+ self.results_per_page = kwargs.get('results_per_page')
+ self.file_system = kwargs.get('container')
+ self.delimiter = kwargs.get('delimiter')
+ self.location_mode = kwargs.get('location_mode')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_models.py
new file mode 100644
index 00000000000..612dc93fe49
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_models.py
@@ -0,0 +1,1028 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+from datetime import datetime
+from enum import Enum
+
+from ...blob import LeaseProperties as BlobLeaseProperties
+from ...blob import AccountSasPermissions as BlobAccountSasPermissions
+from ...blob import ResourceTypes as BlobResourceTypes
+from ...blob import UserDelegationKey as BlobUserDelegationKey
+from ...blob import ContentSettings as BlobContentSettings
+from ...blob import AccessPolicy as BlobAccessPolicy
+from ...blob import DelimitedTextDialect as BlobDelimitedTextDialect
+from ...blob import DelimitedJsonDialect as BlobDelimitedJSON
+from ...blob import ArrowDialect as BlobArrowDialect
+from ...blob._models import ContainerPropertiesPaged
+from ...blob._generated.models import Logging as GenLogging, Metrics as GenMetrics, \
+ RetentionPolicy as GenRetentionPolicy, StaticWebsite as GenStaticWebsite, CorsRule as GenCorsRule
+from ._shared.models import DictMixin
+
+
+class FileSystemProperties(object):
+ """File System properties class.
+
+ :ivar ~datetime.datetime last_modified:
+ A datetime object representing the last time the file system was modified.
+ :ivar str etag:
+ The ETag contains a value that you can use to perform operations
+ conditionally.
+ :ivar ~...filedatalake.LeaseProperties lease:
+ Stores all the lease information for the file system.
+ :ivar str public_access: Specifies whether data in the file system may be accessed
+ publicly and the level of access.
+ :ivar bool has_immutability_policy:
+ Represents whether the file system has an immutability policy.
+ :ivar bool has_legal_hold:
+ Represents whether the file system has a legal hold.
+ :ivar dict metadata: A dict with name-value pairs to associate with the
+ file system as metadata.
+ :ivar bool deleted:
+ Whether this file system was deleted.
+ :ivar str deleted_version:
+ The version of a deleted file system.
+
+ Returned ``FileSystemProperties`` instances expose these values through a
+ dictionary interface, for example: ``file_system_props["last_modified"]``.
+ Additionally, the file system name is available as ``file_system_props["name"]``.
+ """
+
+ def __init__(self):
+ self.name = None
+ self.last_modified = None
+ self.etag = None
+ self.lease = None
+ self.public_access = None
+ self.has_immutability_policy = None
+ self.has_legal_hold = None
+ self.metadata = None
+ self.deleted = None
+ self.deleted_version = None
+
+ @classmethod
+ def _from_generated(cls, generated):
+ props = cls()
+ props.name = generated.name
+ props.last_modified = generated.properties.last_modified
+ props.deleted = generated.deleted
+ props.deleted_version = generated.version
+ props.etag = generated.properties.etag
+ props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
+ props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access
+ generated.properties.public_access)
+ props.has_immutability_policy = generated.properties.has_immutability_policy
+ props.has_legal_hold = generated.properties.has_legal_hold
+ props.metadata = generated.metadata
+ return props
+
+ @classmethod
+ def _convert_from_container_props(cls, container_properties):
+ container_properties.__class__ = cls
+ container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access
+ container_properties.public_access)
+ container_properties.lease.__class__ = LeaseProperties
+ return container_properties
+
+
+class FileSystemPropertiesPaged(ContainerPropertiesPaged):
+ """An Iterable of File System properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A file system name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~...filedatalake.FileSystemProperties)
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str prefix: Filters the results to return only file systems whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of file system names to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(FileSystemPropertiesPaged, self).__init__(
+ *args,
+ **kwargs
+ )
+
+ @staticmethod
+ def _build_item(item):
+ return FileSystemProperties._from_generated(item) # pylint: disable=protected-access
+
+
+class DirectoryProperties(DictMixin):
+ """
+ :ivar str name: name of the directory
+ :ivar str etag: The ETag contains a value that you can use to perform operations
+ conditionally.
+ :ivar bool deleted: if the current directory marked as deleted
+ :ivar dict metadata: Name-value pairs associated with the directory as metadata.
+ :ivar ~...filedatalake.LeaseProperties lease:
+ Stores all the lease information for the directory.
+ :ivar ~datetime.datetime last_modified:
+ A datetime object representing the last time the directory was modified.
+ :ivar ~datetime.datetime creation_time:
+ Indicates when the directory was created, in UTC.
+ :ivar int remaining_retention_days: The number of days that the directory will be retained
+ before being permanently deleted by the service.
+ :var ~...filedatalake.ContentSettings content_settings:
+ """
+
+ def __init__(self, **kwargs):
+ self.name = kwargs.get('name')
+ self.etag = kwargs.get('ETag')
+ self.deleted = False
+ self.metadata = kwargs.get('metadata')
+ self.lease = LeaseProperties(**kwargs)
+ self.last_modified = kwargs.get('Last-Modified')
+ self.creation_time = kwargs.get('x-ms-creation-time')
+ self.deleted_time = None
+ self.remaining_retention_days = None
+
+
+class FileProperties(DictMixin):
+ """
+ :ivar str name: name of the file
+ :ivar str etag: The ETag contains a value that you can use to perform operations
+ conditionally.
+ :ivar bool deleted: if the current file marked as deleted
+ :ivar dict metadata: Name-value pairs associated with the file as metadata.
+ :ivar ~...filedatalake.LeaseProperties lease:
+ Stores all the lease information for the file.
+ :ivar ~datetime.datetime last_modified:
+ A datetime object representing the last time the file was modified.
+ :ivar ~datetime.datetime creation_time:
+ Indicates when the file was created, in UTC.
+ :ivar int size: size of the file
+ :ivar int remaining_retention_days: The number of days that the file will be retained
+ before being permanently deleted by the service.
+ :var ~...filedatalake.ContentSettings content_settings:
+ """
+
+ def __init__(self, **kwargs):
+ self.name = kwargs.get('name')
+ self.etag = kwargs.get('ETag')
+ self.deleted = False
+ self.metadata = kwargs.get('metadata')
+ self.lease = LeaseProperties(**kwargs)
+ self.last_modified = kwargs.get('Last-Modified')
+ self.creation_time = kwargs.get('x-ms-creation-time')
+ self.size = kwargs.get('Content-Length')
+ self.deleted_time = None
+ self.expiry_time = kwargs.get("x-ms-expiry-time")
+ self.remaining_retention_days = None
+ self.content_settings = ContentSettings(**kwargs)
+
+
+class PathProperties(object):
+ """Path properties listed by get_paths api.
+
+ :ivar str name: the full path for a file or directory.
+ :ivar str owner: The owner of the file or directory.
+ :ivar str group: he owning group of the file or directory.
+ :ivar str permissions: Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified.
+ :ivar bool is_directory: is the path a directory or not.
+ :ivar str etag: The ETag contains a value that you can use to perform operations
+ conditionally.
+ :ivar content_length: the size of file if the path is a file.
+ """
+
+ def __init__(self, **kwargs):
+ self.name = kwargs.pop('name', None)
+ self.owner = kwargs.get('owner', None)
+ self.group = kwargs.get('group', None)
+ self.permissions = kwargs.get('permissions', None)
+ self.last_modified = kwargs.get('last_modified', None)
+ self.is_directory = kwargs.get('is_directory', False)
+ self.etag = kwargs.get('etag', None)
+ self.content_length = kwargs.get('content_length', None)
+
+ @classmethod
+ def _from_generated(cls, generated):
+ path_prop = PathProperties()
+ path_prop.name = generated.name
+ path_prop.owner = generated.owner
+ path_prop.group = generated.group
+ path_prop.permissions = generated.permissions
+ path_prop.last_modified = datetime.strptime(generated.last_modified, "%a, %d %b %Y %H:%M:%S %Z")
+ path_prop.is_directory = bool(generated.is_directory)
+ path_prop.etag = generated.additional_properties.get('etag')
+ path_prop.content_length = generated.content_length
+ return path_prop
+
+
+class LeaseProperties(BlobLeaseProperties):
+ """DataLake Lease Properties.
+
+ :ivar str status:
+ The lease status of the file. Possible values: locked|unlocked
+ :ivar str state:
+ Lease state of the file. Possible values: available|leased|expired|breaking|broken
+ :ivar str duration:
+ When a file is leased, specifies whether the lease is of infinite or fixed duration.
+ """
+
+
+class ContentSettings(BlobContentSettings):
+ """The content settings of a file or directory.
+
+ :ivar str content_type:
+ The content type specified for the file or directory. If no content type was
+ specified, the default content type is application/octet-stream.
+ :ivar str content_encoding:
+ If the content_encoding has previously been set
+ for the file, that value is stored.
+ :ivar str content_language:
+ If the content_language has previously been set
+ for the file, that value is stored.
+ :ivar str content_disposition:
+ content_disposition conveys additional information about how to
+ process the response payload, and also can be used to attach
+ additional metadata. If content_disposition has previously been set
+ for the file, that value is stored.
+ :ivar str cache_control:
+ If the cache_control has previously been set for
+ the file, that value is stored.
+ :ivar bytearray content_md5:
+ If the content_md5 has been set for the file, this response
+ header is stored so that the client can check for message content
+ integrity.
+ :keyword str content_type:
+ The content type specified for the file or directory. If no content type was
+ specified, the default content type is application/octet-stream.
+ :keyword str content_encoding:
+ If the content_encoding has previously been set
+ for the file, that value is stored.
+ :keyword str content_language:
+ If the content_language has previously been set
+ for the file, that value is stored.
+ :keyword str content_disposition:
+ content_disposition conveys additional information about how to
+ process the response payload, and also can be used to attach
+ additional metadata. If content_disposition has previously been set
+ for the file, that value is stored.
+ :keyword str cache_control:
+ If the cache_control has previously been set for
+ the file, that value is stored.
+ :keyword bytearray content_md5:
+ If the content_md5 has been set for the file, this response
+ header is stored so that the client can check for message content
+ integrity.
+ """
+
+ def __init__(
+ self, **kwargs):
+ super(ContentSettings, self).__init__(
+ **kwargs
+ )
+
+
+class AccountSasPermissions(BlobAccountSasPermissions):
+ def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin
+ create=False):
+ super(AccountSasPermissions, self).__init__(
+ read=read, create=create, write=write, list=list,
+ delete=delete
+ )
+
+
+class FileSystemSasPermissions(object):
+ """FileSystemSasPermissions class to be used with the
+ :func:`~...filedatalake.generate_file_system_sas` function.
+
+ :param bool read:
+ Read the content, properties, metadata etc.
+ :param bool write:
+ Create or write content, properties, metadata. Lease the file system.
+ :param bool delete:
+ Delete the file system.
+ :param bool list:
+ List paths in the file system.
+ :keyword bool move:
+ Move any file in the directory to a new location.
+ Note the move operation can optionally be restricted to the child file or directory owner or
+ the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+ on the parent directory.
+ :keyword bool execute:
+ Get the status (system defined properties) and ACL of any file in the directory.
+ If the caller is the owner, set access control on any file in the directory.
+ :keyword bool manage_ownership:
+ Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+ within a folder that has the sticky bit set.
+ :keyword bool manage_access_control:
+ Allows the user to set permissions and POSIX ACLs on files and directories.
+ """
+
+ def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin
+ **kwargs):
+ self.read = read
+ self.write = write
+ self.delete = delete
+ self.list = list
+ self.move = kwargs.pop('move', None)
+ self.execute = kwargs.pop('execute', None)
+ self.manage_ownership = kwargs.pop('manage_ownership', None)
+ self.manage_access_control = kwargs.pop('manage_access_control', None)
+ self._str = (('r' if self.read else '') +
+ ('w' if self.write else '') +
+ ('d' if self.delete else '') +
+ ('l' if self.list else '') +
+ ('m' if self.move else '') +
+ ('e' if self.execute else '') +
+ ('o' if self.manage_ownership else '') +
+ ('p' if self.manage_access_control else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, permission):
+ """Create a FileSystemSasPermissions from a string.
+
+ To specify read, write, or delete permissions you need only to
+ include the first letter of the word in the string. E.g. For read and
+ write permissions, you would provide a string "rw".
+
+ :param str permission: The string which dictates the read, add, create,
+ write, or delete permissions.
+ :return: A FileSystemSasPermissions object
+ :rtype: ~...fildatalake.FileSystemSasPermissions
+ """
+ p_read = 'r' in permission
+ p_write = 'w' in permission
+ p_delete = 'd' in permission
+ p_list = 'l' in permission
+ p_move = 'm' in permission
+ p_execute = 'e' in permission
+ p_manage_ownership = 'o' in permission
+ p_manage_access_control = 'p' in permission
+
+ parsed = cls(read=p_read, write=p_write, delete=p_delete,
+ list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
+ manage_access_control=p_manage_access_control)
+ return parsed
+
+
+class DirectorySasPermissions(object):
+ """DirectorySasPermissions class to be used with the
+ :func:`~...filedatalake.generate_directory_sas` function.
+
+ :param bool read:
+ Read the content, properties, metadata etc.
+ :param bool create:
+ Create a new directory
+ :param bool write:
+ Create or write content, properties, metadata. Lease the directory.
+ :param bool delete:
+ Delete the directory.
+ :keyword bool list:
+ List any files in the directory. Implies Execute.
+ :keyword bool move:
+ Move any file in the directory to a new location.
+ Note the move operation can optionally be restricted to the child file or directory owner or
+ the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+ on the parent directory.
+ :keyword bool execute:
+ Get the status (system defined properties) and ACL of any file in the directory.
+ If the caller is the owner, set access control on any file in the directory.
+ :keyword bool manage_ownership:
+ Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+ within a folder that has the sticky bit set.
+ :keyword bool manage_access_control:
+ Allows the user to set permissions and POSIX ACLs on files and directories.
+ """
+
+ def __init__(self, read=False, create=False, write=False,
+ delete=False, **kwargs):
+ self.read = read
+ self.create = create
+ self.write = write
+ self.delete = delete
+ self.list = kwargs.pop('list', None)
+ self.move = kwargs.pop('move', None)
+ self.execute = kwargs.pop('execute', None)
+ self.manage_ownership = kwargs.pop('manage_ownership', None)
+ self.manage_access_control = kwargs.pop('manage_access_control', None)
+ self._str = (('r' if self.read else '') +
+ ('c' if self.create else '') +
+ ('w' if self.write else '') +
+ ('d' if self.delete else '') +
+ ('l' if self.list else '') +
+ ('m' if self.move else '') +
+ ('e' if self.execute else '') +
+ ('o' if self.manage_ownership else '') +
+ ('p' if self.manage_access_control else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, permission):
+ """Create a DirectorySasPermissions from a string.
+
+ To specify read, create, write, or delete permissions you need only to
+ include the first letter of the word in the string. E.g. For read and
+ write permissions, you would provide a string "rw".
+
+ :param str permission: The string which dictates the read, add, create,
+ write, or delete permissions.
+ :return: A DirectorySasPermissions object
+ :rtype: ~...filedatalake.DirectorySasPermissions
+ """
+ p_read = 'r' in permission
+ p_create = 'c' in permission
+ p_write = 'w' in permission
+ p_delete = 'd' in permission
+ p_list = 'l' in permission
+ p_move = 'm' in permission
+ p_execute = 'e' in permission
+ p_manage_ownership = 'o' in permission
+ p_manage_access_control = 'p' in permission
+
+ parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
+ list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
+ manage_access_control=p_manage_access_control)
+ return parsed
+
+
+class FileSasPermissions(object):
+ """FileSasPermissions class to be used with the
+ :func:`~...filedatalake.generate_file_sas` function.
+
+ :param bool read:
+ Read the content, properties, metadata etc. Use the file as
+ the source of a read operation.
+ :param bool create:
+ Write a new file
+ :param bool write:
+ Create or write content, properties, metadata. Lease the file.
+ :param bool delete:
+ Delete the file.
+ :keyword bool move:
+ Move any file in the directory to a new location.
+ Note the move operation can optionally be restricted to the child file or directory owner or
+ the parent directory owner if the saoid parameter is included in the token and the sticky bit is set
+ on the parent directory.
+ :keyword bool execute:
+ Get the status (system defined properties) and ACL of any file in the directory.
+ If the caller is the owner, set access control on any file in the directory.
+ :keyword bool manage_ownership:
+ Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory
+ within a folder that has the sticky bit set.
+ :keyword bool manage_access_control:
+ Allows the user to set permissions and POSIX ACLs on files and directories.
+ """
+
+ def __init__(self, read=False, create=False, write=False, delete=False, **kwargs):
+ self.read = read
+ self.create = create
+ self.write = write
+ self.delete = delete
+ self.list = list
+ self.move = kwargs.pop('move', None)
+ self.execute = kwargs.pop('execute', None)
+ self.manage_ownership = kwargs.pop('manage_ownership', None)
+ self.manage_access_control = kwargs.pop('manage_access_control', None)
+ self._str = (('r' if self.read else '') +
+ ('c' if self.create else '') +
+ ('w' if self.write else '') +
+ ('d' if self.delete else '') +
+ ('m' if self.move else '') +
+ ('e' if self.execute else '') +
+ ('o' if self.manage_ownership else '') +
+ ('p' if self.manage_access_control else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, permission):
+ """Create a FileSasPermissions from a string.
+
+ To specify read, write, or delete permissions you need only to
+ include the first letter of the word in the string. E.g. For read and
+ write permissions, you would provide a string "rw".
+
+ :param str permission: The string which dictates the read, add, create,
+ write, or delete permissions.
+ :return: A FileSasPermissions object
+ :rtype: ~...fildatalake.FileSasPermissions
+ """
+ p_read = 'r' in permission
+ p_create = 'c' in permission
+ p_write = 'w' in permission
+ p_delete = 'd' in permission
+ p_move = 'm' in permission
+ p_execute = 'e' in permission
+ p_manage_ownership = 'o' in permission
+ p_manage_access_control = 'p' in permission
+
+ parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete,
+ move=p_move, execute=p_execute, manage_ownership=p_manage_ownership,
+ manage_access_control=p_manage_access_control)
+ return parsed
+
+
+class AccessPolicy(BlobAccessPolicy):
+ """Access Policy class used by the set and get access policy methods in each service.
+
+ A stored access policy can specify the start time, expiry time, and
+ permissions for the Shared Access Signatures with which it's associated.
+ Depending on how you want to control access to your resource, you can
+ specify all of these parameters within the stored access policy, and omit
+ them from the URL for the Shared Access Signature. Doing so permits you to
+ modify the associated signature's behavior at any time, as well as to revoke
+ it. Or you can specify one or more of the access policy parameters within
+ the stored access policy, and the others on the URL. Finally, you can
+ specify all of the parameters on the URL. In this case, you can use the
+ stored access policy to revoke the signature, but not to modify its behavior.
+
+ Together the Shared Access Signature and the stored access policy must
+ include all fields required to authenticate the signature. If any required
+ fields are missing, the request will fail. Likewise, if a field is specified
+ both in the Shared Access Signature URL and in the stored access policy, the
+ request will fail with status code 400 (Bad Request).
+
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~...datalake.FileSystemSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :keyword start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :paramtype start: ~datetime.datetime or str
+ """
+
+ def __init__(self, permission=None, expiry=None, **kwargs):
+ super(AccessPolicy, self).__init__(
+ permission=permission, expiry=expiry, start=kwargs.pop('start', None)
+ )
+
+
+class ResourceTypes(BlobResourceTypes):
+ """
+ Specifies the resource types that are accessible with the account SAS.
+
+ :param bool service:
+ Access to service-level APIs (e.g.List File Systems)
+ :param bool file_system:
+ Access to file_system-level APIs (e.g., Create/Delete file system,
+ List Directories/Files)
+ :param bool object:
+ Access to object-level APIs for
+ files(e.g. Create File, etc.)
+ """
+
+ def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin
+ ):
+ super(ResourceTypes, self).__init__(service=service, container=file_system, object=object)
+
+
+class UserDelegationKey(BlobUserDelegationKey):
+ """
+ Represents a user delegation key, provided to the user by Azure Storage
+ based on their Azure Active Directory access token.
+
+ The fields are saved as simple strings since the user does not have to interact with this object;
+ to generate an identify SAS, the user can simply pass it to the right API.
+
+ :ivar str signed_oid:
+ Object ID of this token.
+ :ivar str signed_tid:
+ Tenant ID of the tenant that issued this token.
+ :ivar str signed_start:
+ The datetime this token becomes valid.
+ :ivar str signed_expiry:
+ The datetime this token expires.
+ :ivar str signed_service:
+ What service this key is valid for.
+ :ivar str signed_version:
+ The version identifier of the REST service that created this token.
+ :ivar str value:
+ The user delegation key.
+ """
+
+ @classmethod
+ def _from_generated(cls, generated):
+ delegation_key = cls()
+ delegation_key.signed_oid = generated.signed_oid
+ delegation_key.signed_tid = generated.signed_tid
+ delegation_key.signed_start = generated.signed_start
+ delegation_key.signed_expiry = generated.signed_expiry
+ delegation_key.signed_service = generated.signed_service
+ delegation_key.signed_version = generated.signed_version
+ delegation_key.value = generated.value
+ return delegation_key
+
+
+class PublicAccess(str, Enum):
+ """
+ Specifies whether data in the file system may be accessed publicly and the level of access.
+ """
+
+ File = 'blob'
+ """
+ Specifies public read access for files. file data within this file system can be read
+ via anonymous request, but file system data is not available. Clients cannot enumerate
+ files within the container via anonymous request.
+ """
+
+ FileSystem = 'container'
+ """
+ Specifies full public read access for file system and file data. Clients can enumerate
+ files within the file system via anonymous request, but cannot enumerate file systems
+ within the storage account.
+ """
+
+ @classmethod
+ def _from_generated(cls, public_access):
+ if public_access == "blob": # pylint:disable=no-else-return
+ return cls.File
+ elif public_access == "container":
+ return cls.FileSystem
+
+ return None
+
+
+class LocationMode(object):
+ """
+ Specifies the location the request should be sent to. This mode only applies
+ for RA-GRS accounts which allow secondary read access. All other account types
+ must use PRIMARY.
+ """
+
+ PRIMARY = 'primary' #: Requests should be sent to the primary location.
+ SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible.
+
+
+class DelimitedJsonDialect(BlobDelimitedJSON):
+ """Defines the input or output JSON serialization for a datalake query.
+
+ :keyword str delimiter: The line separator character, default value is '\n'
+ """
+
+
+class DelimitedTextDialect(BlobDelimitedTextDialect):
+ """Defines the input or output delimited (CSV) serialization for a datalake query request.
+
+ :keyword str delimiter:
+ Column separator, defaults to ','.
+ :keyword str quotechar:
+ Field quote, defaults to '"'.
+ :keyword str lineterminator:
+ Record separator, defaults to '\n'.
+ :keyword str escapechar:
+ Escape char, defaults to empty.
+ :keyword bool has_header:
+ Whether the blob data includes headers in the first line. The default value is False, meaning that the
+ data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
+ of the first line.
+ """
+
+
+class ArrowDialect(BlobArrowDialect):
+ """field of an arrow schema.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param str type: Required.
+ :keyword str name: The name of the field.
+ :keyword int precision: The precision of the field.
+ :keyword int scale: The scale of the field.
+ """
+
+
+class ArrowType(str, Enum):
+
+ INT64 = "int64"
+ BOOL = "bool"
+ TIMESTAMP_MS = "timestamp[ms]"
+ STRING = "string"
+ DOUBLE = "double"
+ DECIMAL = 'decimal'
+
+
+class DataLakeFileQueryError(object):
+ """The error happened during quick query operation.
+
+ :ivar str error:
+ The name of the error.
+ :ivar bool is_fatal:
+ If true, this error prevents further query processing. More result data may be returned,
+ but there is no guarantee that all of the original data will be processed.
+ If false, this error does not prevent further query processing.
+ :ivar str description:
+ A description of the error.
+ :ivar int position:
+ The blob offset at which the error occurred.
+ """
+
+ def __init__(self, error=None, is_fatal=False, description=None, position=None):
+ self.error = error
+ self.is_fatal = is_fatal
+ self.description = description
+ self.position = position
+
+
+class AccessControlChangeCounters(DictMixin):
+ """
+ AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively.
+
+ :ivar int directories_successful:
+ Number of directories where Access Control List has been updated successfully.
+ :ivar int files_successful:
+ Number of files where Access Control List has been updated successfully.
+ :ivar int failure_count:
+ Number of paths where Access Control List update has failed.
+ """
+
+ def __init__(self, directories_successful, files_successful, failure_count):
+ self.directories_successful = directories_successful
+ self.files_successful = files_successful
+ self.failure_count = failure_count
+
+
+class AccessControlChangeResult(DictMixin):
+ """
+ AccessControlChangeResult contains result of operations that change Access Control Lists recursively.
+
+ :ivar ~...filedatalake.AccessControlChangeCounters counters:
+ Contains counts of paths changed from start of the operation.
+ :ivar str continuation:
+ Optional continuation token.
+ Value is present when operation is split into multiple batches and can be used to resume progress.
+ """
+
+ def __init__(self, counters, continuation):
+ self.counters = counters
+ self.continuation = continuation
+
+
+class AccessControlChangeFailure(DictMixin):
+ """
+ Represents an entry that failed to update Access Control List.
+
+ :ivar str name:
+ Name of the entry.
+ :ivar bool is_directory:
+ Indicates whether the entry is a directory.
+ :ivar str error_message:
+ Indicates the reason why the entry failed to update.
+ """
+
+ def __init__(self, name, is_directory, error_message):
+ self.name = name
+ self.is_directory = is_directory
+ self.error_message = error_message
+
+
+class AccessControlChanges(DictMixin):
+ """
+ AccessControlChanges contains batch and cumulative counts of operations
+ that change Access Control Lists recursively.
+ Additionally it exposes path entries that failed to update while these operations progress.
+
+ :ivar ~...filedatalake.AccessControlChangeCounters batch_counters:
+ Contains counts of paths changed within single batch.
+ :ivar ~...filedatalake.AccessControlChangeCounters aggregate_counters:
+ Contains counts of paths changed from start of the operation.
+ :ivar list(~...filedatalake.AccessControlChangeFailure) batch_failures:
+ List of path entries that failed to update Access Control List within single batch.
+ :ivar str continuation:
+ An opaque continuation token that may be used to resume the operations in case of failures.
+ """
+
+ def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation):
+ self.batch_counters = batch_counters
+ self.aggregate_counters = aggregate_counters
+ self.batch_failures = batch_failures
+ self.continuation = continuation
+
+
+class DeletedPathProperties(DictMixin):
+ """
+ Properties populated for a deleted path.
+
+ :ivar str name:
+ The name of the file in the path.
+ :ivar ~datetime.datetime deleted_time:
+ A datetime object representing the time at which the path was deleted.
+ :ivar int remaining_retention_days:
+ The number of days that the path will be retained before being permanently deleted by the service.
+ :ivar str deletion_id:
+ The id associated with the deleted path.
+ """
+ def __init__(self, **kwargs):
+ self.name = kwargs.get('name')
+ self.deleted_time = None
+ self.remaining_retention_days = None
+ self.deletion_id = None
+
+
+class AnalyticsLogging(GenLogging):
+ """Azure Analytics Logging settings.
+
+ :keyword str version:
+ The version of Storage Analytics to configure. The default value is 1.0.
+ :keyword bool delete:
+ Indicates whether all delete requests should be logged. The default value is `False`.
+ :keyword bool read:
+ Indicates whether all read requests should be logged. The default value is `False`.
+ :keyword bool write:
+ Indicates whether all write requests should be logged. The default value is `False`.
+ :keyword ~...filedatalake.RetentionPolicy retention_policy:
+ Determines how long the associated data should persist. If not specified the retention
+ policy will be disabled by default.
+ """
+
+ def __init__(self, **kwargs):
+ self.version = kwargs.get('version', u'1.0')
+ self.delete = kwargs.get('delete', False)
+ self.read = kwargs.get('read', False)
+ self.write = kwargs.get('write', False)
+ self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ version=generated.version,
+ delete=generated.delete,
+ read=generated.read,
+ write=generated.write,
+ retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
+ )
+
+
+class Metrics(GenMetrics):
+ """A summary of request statistics grouped by API in hour or minute aggregates.
+
+ :keyword str version:
+ The version of Storage Analytics to configure. The default value is 1.0.
+ :keyword bool enabled:
+ Indicates whether metrics are enabled for the Datalake service.
+ The default value is `False`.
+ :keyword bool include_apis:
+ Indicates whether metrics should generate summary statistics for called API operations.
+ :keyword ~...filedatalake.RetentionPolicy retention_policy:
+ Determines how long the associated data should persist. If not specified the retention
+ policy will be disabled by default.
+ """
+
+ def __init__(self, **kwargs):
+ self.version = kwargs.get('version', u'1.0')
+ self.enabled = kwargs.get('enabled', False)
+ self.include_apis = kwargs.get('include_apis')
+ self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ version=generated.version,
+ enabled=generated.enabled,
+ include_apis=generated.include_apis,
+ retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
+ )
+
+
+class RetentionPolicy(GenRetentionPolicy):
+ """The retention policy which determines how long the associated data should
+ persist.
+
+ :param bool enabled:
+ Indicates whether a retention policy is enabled for the storage service.
+ The default value is False.
+ :param int days:
+ Indicates the number of days that metrics or logging or
+ soft-deleted data should be retained. All data older than this value will
+ be deleted. If enabled=True, the number of days must be specified.
+ """
+
+ def __init__(self, enabled=False, days=None):
+ super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None)
+ if self.enabled and (self.days is None):
+ raise ValueError("If policy is enabled, 'days' must be specified.")
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ enabled=generated.enabled,
+ days=generated.days,
+ )
+
+
+class StaticWebsite(GenStaticWebsite):
+ """The properties that enable an account to host a static website.
+
+ :keyword bool enabled:
+ Indicates whether this account is hosting a static website.
+ The default value is `False`.
+ :keyword str index_document:
+ The default name of the index page under each directory.
+ :keyword str error_document404_path:
+ The absolute path of the custom 404 page.
+ :keyword str default_index_document_path:
+ Absolute path of the default index page.
+ """
+
+ def __init__(self, **kwargs):
+ self.enabled = kwargs.get('enabled', False)
+ if self.enabled:
+ self.index_document = kwargs.get('index_document')
+ self.error_document404_path = kwargs.get('error_document404_path')
+ self.default_index_document_path = kwargs.get('default_index_document_path')
+ else:
+ self.index_document = None
+ self.error_document404_path = None
+ self.default_index_document_path = None
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ enabled=generated.enabled,
+ index_document=generated.index_document,
+ error_document404_path=generated.error_document404_path,
+ default_index_document_path=generated.default_index_document_path
+ )
+
+
+class CorsRule(GenCorsRule):
+ """CORS is an HTTP feature that enables a web application running under one
+ domain to access resources in another domain. Web browsers implement a
+ security restriction known as same-origin policy that prevents a web page
+ from calling APIs in a different domain; CORS provides a secure way to
+ allow one domain (the origin domain) to call APIs in another domain.
+
+ :param list(str) allowed_origins:
+ A list of origin domains that will be allowed via CORS, or "*" to allow
+ all domains. The list of must contain at least one entry. Limited to 64
+ origin domains. Each allowed origin can have up to 256 characters.
+ :param list(str) allowed_methods:
+ A list of HTTP methods that are allowed to be executed by the origin.
+ The list of must contain at least one entry. For Azure Storage,
+ permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
+ :keyword list(str) allowed_headers:
+ Defaults to an empty list. A list of headers allowed to be part of
+ the cross-origin request. Limited to 64 defined headers and 2 prefixed
+ headers. Each header can be up to 256 characters.
+ :keyword list(str) exposed_headers:
+ Defaults to an empty list. A list of response headers to expose to CORS
+ clients. Limited to 64 defined headers and two prefixed headers. Each
+ header can be up to 256 characters.
+ :keyword int max_age_in_seconds:
+ The number of seconds that the client/browser should cache a
+ preflight response.
+ """
+
+ def __init__(self, allowed_origins, allowed_methods, **kwargs):
+ self.allowed_origins = ','.join(allowed_origins)
+ self.allowed_methods = ','.join(allowed_methods)
+ self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
+ self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
+ self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
+
+ @classmethod
+ def _from_generated(cls, generated):
+ return cls(
+ [generated.allowed_origins],
+ [generated.allowed_methods],
+ allowed_headers=[generated.allowed_headers],
+ exposed_headers=[generated.exposed_headers],
+ max_age_in_seconds=generated.max_age_in_seconds,
+ )
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_path_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_path_client.py
new file mode 100644
index 00000000000..7378ffe9d08
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_path_client.py
@@ -0,0 +1,896 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from datetime import datetime
+from typing import Any, Dict, Union
+
+try:
+ from urllib.parse import urlparse, quote
+except ImportError:
+ from urlparse import urlparse # type: ignore
+ from urllib2 import quote # type: ignore
+
+import six
+
+from azure.core.exceptions import AzureError, HttpResponseError
+from ...blob import BlobClient
+from ._data_lake_lease import DataLakeLeaseClient
+from ._deserialize import process_storage_error
+from ._generated import AzureDataLakeStorageRESTAPI
+from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \
+ AccessControlChangeCounters, AccessControlChangeFailure
+from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \
+ get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions
+from ._shared.base_client import StorageAccountHostsMixin, parse_query
+from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized
+
+_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = (
+ 'The require_encryption flag is set, but encryption is not supported'
+ ' for this method.')
+
+
+class PathClient(StorageAccountHostsMixin):
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ path_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+
+ try:
+ if not account_url.lower().startswith('http'):
+ account_url = "https://" + account_url
+ except AttributeError:
+ raise ValueError("Account URL must be a string.")
+ parsed_url = urlparse(account_url.rstrip('/'))
+
+ # remove the preceding/trailing delimiter from the path components
+ file_system_name = file_system_name.strip('/')
+
+ # the name of root directory is /
+ if path_name != '/':
+ path_name = path_name.strip('/')
+
+ if not (file_system_name and path_name):
+ raise ValueError("Please specify a file system name and file path.")
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(account_url))
+
+ blob_account_url = convert_dfs_url_to_blob_url(account_url)
+ self._blob_account_url = blob_account_url
+
+ datalake_hosts = kwargs.pop('_hosts', None)
+ blob_hosts = None
+ if datalake_hosts:
+ blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY])
+ blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""}
+ self._blob_client = BlobClient(blob_account_url, file_system_name, path_name,
+ credential=credential, _hosts=blob_hosts, **kwargs)
+
+ _, sas_token = parse_query(parsed_url.query)
+ self.file_system_name = file_system_name
+ self.path_name = path_name
+
+ self._query_str, self._raw_credential = self._format_query_string(sas_token, credential)
+
+ super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential,
+ _hosts=datalake_hosts, **kwargs)
+ # ADLS doesn't support secondary endpoint, make sure it's empty
+ self._hosts[LocationMode.SECONDARY] = ""
+ self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name,
+ pipeline=self._pipeline)
+ self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(
+ self._blob_client.url,
+ file_system=file_system_name,
+ path=path_name,
+ pipeline=self._pipeline)
+
+ def __exit__(self, *args):
+ self._blob_client.close()
+ super(PathClient, self).__exit__(*args)
+
+ def close(self):
+ # type: () -> None
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ self._blob_client.close()
+ self.__exit__()
+
+ def _format_url(self, hostname):
+ file_system_name = self.file_system_name
+ if isinstance(file_system_name, six.text_type):
+ file_system_name = file_system_name.encode('UTF-8')
+ return "{}://{}/{}/{}{}".format(
+ self.scheme,
+ hostname,
+ quote(file_system_name),
+ quote(self.path_name, safe='~'),
+ self._query_str)
+
+ def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_mod_conditions(kwargs)
+
+ path_http_headers = None
+ if content_settings:
+ path_http_headers = get_path_http_headers(content_settings)
+
+ options = {
+ 'resource': resource_type,
+ 'properties': add_metadata_headers(metadata),
+ 'permissions': kwargs.pop('permissions', None),
+ 'umask': kwargs.pop('umask', None),
+ 'path_http_headers': path_http_headers,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Create directory or file
+
+ :param resource_type:
+ Required for Create File and Create Directory.
+ The value must be "file" or "directory". Possible values include:
+ 'directory', 'file'
+ :type resource_type: str
+ :param ~...filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :param metadata:
+ Name-value pairs associated with the file/directory as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :type permissions: str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Dict[str, Union[str, datetime]]
+ """
+ options = self._create_path_options(
+ resource_type,
+ content_settings=content_settings,
+ metadata=metadata,
+ **kwargs)
+ try:
+ return self._client.path.create(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ @staticmethod
+ def _delete_path_options(**kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_mod_conditions(kwargs)
+
+ options = {
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cls': return_response_headers,
+ 'timeout': kwargs.pop('timeout', None)}
+ options.update(kwargs)
+ return options
+
+ def _delete(self, **kwargs):
+ # type: (**Any) -> Dict[Union[datetime, str]]
+ """
+ Marks the specified path for deletion.
+
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :type lease: ~...filedatalake.DataLakeLeaseClient or str
+ :param ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :param ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :param int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ options = self._delete_path_options(**kwargs)
+ try:
+ return self._client.path.delete(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ @staticmethod
+ def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_mod_conditions(kwargs)
+
+ options = {
+ 'owner': owner,
+ 'group': group,
+ 'permissions': permissions,
+ 'acl': acl,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ def set_access_control(self, owner=None, # type: Optional[str]
+ group=None, # type: Optional[str]
+ permissions=None, # type: Optional[str]
+ acl=None, # type: Optional[str]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Set the owner, group, permissions, or access control list for a path.
+
+ :param owner:
+ Optional. The owner of the file or directory.
+ :type owner: str
+ :param group:
+ Optional. The owning group of the file or directory.
+ :type group: str
+ :param permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ permissions and acl are mutually exclusive.
+ :type permissions: str
+ :param acl:
+ Sets POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ permissions and acl are mutually exclusive.
+ :type acl: str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword: response dict (Etag and last modified).
+ """
+ if not any([owner, group, permissions, acl]):
+ raise ValueError("At least one parameter should be set for set_access_control API")
+ options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
+ try:
+ return self._client.path.set_access_control(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ @staticmethod
+ def _get_access_control_options(upn=None, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_mod_conditions(kwargs)
+
+ options = {
+ 'action': 'getAccessControl',
+ 'upn': upn if upn else False,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ def get_access_control(self, upn=None, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+ """
+ :param upn: Optional.
+ Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the user identity values returned
+ in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false. Note that group and
+ application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword: response dict.
+ """
+ options = self._get_access_control_options(upn=upn, **kwargs)
+ try:
+ return self._client.path.get_properties(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ @staticmethod
+ def _set_access_control_recursive_options(mode, acl, **kwargs):
+ # type: (str, str, **Any) -> Dict[str, Any]
+
+ options = {
+ 'mode': mode,
+ 'force_flag': kwargs.pop('continue_on_failure', None),
+ 'timeout': kwargs.pop('timeout', None),
+ 'continuation': kwargs.pop('continuation_token', None),
+ 'max_records': kwargs.pop('batch_size', None),
+ 'acl': acl,
+ 'cls': return_headers_and_deserialized}
+ options.update(kwargs)
+ return options
+
+ def set_access_control_recursive(self,
+ acl,
+ **kwargs):
+ # type: (str, **Any) -> AccessControlChangeResult
+ """
+ Sets the Access Control on a path and sub-paths.
+
+ :param acl:
+ Sets POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :keyword func(~...filedatalake.AccessControlChanges) progress_hook:
+ Callback where the caller can track progress of the operation
+ as well as collect paths that failed to change Access Control.
+ :keyword str continuation_token:
+ Optional continuation token that can be used to resume previously stopped operation.
+ :keyword int batch_size:
+ Optional. If data set size exceeds batch size then operation will be split into multiple
+ requests so that progress can be tracked. Batch size should be between 1 and 2000.
+ The default when unspecified is 2000.
+ :keyword int max_batches:
+ Optional. Defines maximum number of batches that single change Access Control operation can execute.
+ If maximum is reached before all sub-paths are processed,
+ then continuation token can be used to resume operation.
+ Empty value indicates that maximum number of batches in unbound and operation continues till end.
+ :keyword bool continue_on_failure:
+ If set to False, the operation will terminate quickly on encountering user errors (4XX).
+ If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+ the directory.
+ Continuation token will only be returned when continue_on_failure is True in case of user errors.
+ If not set the default value is False for this.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: A summary of the recursive operations, including the count of successes and failures,
+ as well as a continuation token in case the operation was terminated prematurely.
+ :rtype: :class:`~...filedatalake.AccessControlChangeResult`
+ :raises ~azure.core.exceptions.AzureError:
+ User can restart the operation using continuation_token field of AzureError if the token is available.
+ """
+ if not acl:
+ raise ValueError("The Access Control List must be set for this operation")
+
+ progress_hook = kwargs.pop('progress_hook', None)
+ max_batches = kwargs.pop('max_batches', None)
+ options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs)
+ return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+ max_batches=max_batches)
+
+ def update_access_control_recursive(self,
+ acl,
+ **kwargs):
+ # type: (str, **Any) -> AccessControlChangeResult
+ """
+ Modifies the Access Control on a path and sub-paths.
+
+ :param acl:
+ Modifies POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :keyword func(~...filedatalake.AccessControlChanges) progress_hook:
+ Callback where the caller can track progress of the operation
+ as well as collect paths that failed to change Access Control.
+ :keyword str continuation_token:
+ Optional continuation token that can be used to resume previously stopped operation.
+ :keyword int batch_size:
+ Optional. If data set size exceeds batch size then operation will be split into multiple
+ requests so that progress can be tracked. Batch size should be between 1 and 2000.
+ The default when unspecified is 2000.
+ :keyword int max_batches:
+ Optional. Defines maximum number of batches that single change Access Control operation can execute.
+ If maximum is reached before all sub-paths are processed,
+ then continuation token can be used to resume operation.
+ Empty value indicates that maximum number of batches in unbound and operation continues till end.
+ :keyword bool continue_on_failure:
+ If set to False, the operation will terminate quickly on encountering user errors (4XX).
+ If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+ the directory.
+ Continuation token will only be returned when continue_on_failure is True in case of user errors.
+ If not set the default value is False for this.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: A summary of the recursive operations, including the count of successes and failures,
+ as well as a continuation token in case the operation was terminated prematurely.
+ :rtype: :class:`~...filedatalake.AccessControlChangeResult`
+ :raises ~azure.core.exceptions.AzureError:
+ User can restart the operation using continuation_token field of AzureError if the token is available.
+ """
+ if not acl:
+ raise ValueError("The Access Control List must be set for this operation")
+
+ progress_hook = kwargs.pop('progress_hook', None)
+ max_batches = kwargs.pop('max_batches', None)
+ options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs)
+ return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+ max_batches=max_batches)
+
+ def remove_access_control_recursive(self,
+ acl,
+ **kwargs):
+ # type: (str, **Any) -> AccessControlChangeResult
+ """
+ Removes the Access Control on a path and sub-paths.
+
+ :param acl:
+ Removes POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, and a user or
+ group identifier in the format "[scope:][type]:[id]".
+ :type acl: str
+ :keyword func(~...filedatalake.AccessControlChanges) progress_hook:
+ Callback where the caller can track progress of the operation
+ as well as collect paths that failed to change Access Control.
+ :keyword str continuation_token:
+ Optional continuation token that can be used to resume previously stopped operation.
+ :keyword int batch_size:
+ Optional. If data set size exceeds batch size then operation will be split into multiple
+ requests so that progress can be tracked. Batch size should be between 1 and 2000.
+ The default when unspecified is 2000.
+ :keyword int max_batches:
+ Optional. Defines maximum number of batches that single change Access Control operation can execute.
+ If maximum is reached before all sub-paths are processed then,
+ continuation token can be used to resume operation.
+ Empty value indicates that maximum number of batches in unbound and operation continues till end.
+ :keyword bool continue_on_failure:
+ If set to False, the operation will terminate quickly on encountering user errors (4XX).
+ If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+ the directory.
+ Continuation token will only be returned when continue_on_failure is True in case of user errors.
+ If not set the default value is False for this.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: A summary of the recursive operations, including the count of successes and failures,
+ as well as a continuation token in case the operation was terminated prematurely.
+ :rtype: :class:`~...filedatalake.AccessControlChangeResult`
+ :raises ~azure.core.exceptions.AzureError:
+ User can restart the operation using continuation_token field of AzureError if the token is available.
+ """
+ if not acl:
+ raise ValueError("The Access Control List must be set for this operation")
+
+ progress_hook = kwargs.pop('progress_hook', None)
+ max_batches = kwargs.pop('max_batches', None)
+ options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs)
+ return self._set_access_control_internal(options=options, progress_hook=progress_hook,
+ max_batches=max_batches)
+
+ def _set_access_control_internal(self, options, progress_hook, max_batches=None):
+ try:
+ continue_on_failure = options.get('force_flag')
+ total_directories_successful = 0
+ total_files_success = 0
+ total_failure_count = 0
+ batch_count = 0
+ last_continuation_token = None
+ current_continuation_token = None
+ continue_operation = True
+ while continue_operation:
+ headers, resp = self._client.path.set_access_control_recursive(**options)
+
+ # make a running tally so that we can report the final results
+ total_directories_successful += resp.directories_successful
+ total_files_success += resp.files_successful
+ total_failure_count += resp.failure_count
+ batch_count += 1
+ current_continuation_token = headers['continuation']
+
+ if current_continuation_token is not None:
+ last_continuation_token = current_continuation_token
+
+ if progress_hook is not None:
+ progress_hook(AccessControlChanges(
+ batch_counters=AccessControlChangeCounters(
+ directories_successful=resp.directories_successful,
+ files_successful=resp.files_successful,
+ failure_count=resp.failure_count,
+ ),
+ aggregate_counters=AccessControlChangeCounters(
+ directories_successful=total_directories_successful,
+ files_successful=total_files_success,
+ failure_count=total_failure_count,
+ ),
+ batch_failures=[AccessControlChangeFailure(
+ name=failure.name,
+ is_directory=failure.type == 'DIRECTORY',
+ error_message=failure.error_message) for failure in resp.failed_entries],
+ continuation=last_continuation_token))
+
+ # update the continuation token, if there are more operations that cannot be completed in a single call
+ max_batches_satisfied = (max_batches is not None and batch_count == max_batches)
+ continue_operation = bool(current_continuation_token) and not max_batches_satisfied
+ options['continuation'] = current_continuation_token
+
+ # currently the service stops on any failure, so we should send back the last continuation token
+ # for the user to retry the failed updates
+ # otherwise we should just return what the service gave us
+ return AccessControlChangeResult(counters=AccessControlChangeCounters(
+ directories_successful=total_directories_successful,
+ files_successful=total_files_success,
+ failure_count=total_failure_count),
+ continuation=last_continuation_token
+ if total_failure_count > 0 and not continue_on_failure else current_continuation_token)
+ except HttpResponseError as error:
+ error.continuation_token = last_continuation_token
+ process_storage_error(error)
+ except AzureError as error:
+ error.continuation_token = last_continuation_token
+ raise error
+
+ def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+ if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None):
+ raise ValueError("metadata, permissions, umask is not supported for this operation")
+
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ source_lease_id = get_lease_id(kwargs.pop('source_lease', None))
+ mod_conditions = get_mod_conditions(kwargs)
+ source_mod_conditions = get_source_mod_conditions(kwargs)
+
+ path_http_headers = None
+ if content_settings:
+ path_http_headers = get_path_http_headers(content_settings)
+
+ options = {
+ 'rename_source': rename_source,
+ 'path_http_headers': path_http_headers,
+ 'lease_access_conditions': access_conditions,
+ 'source_lease_id': source_lease_id,
+ 'modified_access_conditions': mod_conditions,
+ 'source_modified_access_conditions': source_mod_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'mode': 'legacy',
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ def _rename_path(self, rename_source, **kwargs):
+ # type: (str, **Any) -> Dict[str, Any]
+ """
+ Rename directory or file
+
+ :param rename_source:
+ The value must have the following format: "/{filesystem}/{path}".
+ :type rename_source: str
+ :keyword ~...filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword source_lease:
+ A lease ID for the source path. If specified,
+ the source path must have an active lease and the leaase ID must
+ match.
+ :paramtype source_lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ """
+ options = self._rename_path_options(
+ rename_source,
+ **kwargs)
+ try:
+ return self._client.path.create(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ def _get_path_properties(self, **kwargs):
+ # type: (**Any) -> Union[FileProperties, DirectoryProperties]
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the file or directory. It does not return the content of the directory or file.
+
+ :keyword lease:
+ Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: DirectoryProperties or FileProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../tests/test_blob_samples_common.py
+ :start-after: [START get_blob_properties]
+ :end-before: [END get_blob_properties]
+ :language: python
+ :dedent: 8
+ :caption: Getting the properties for a file/directory.
+ """
+ path_properties = self._blob_client.get_blob_properties(**kwargs)
+ return path_properties
+
+ def _exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a path exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return self._blob_client.exists(**kwargs)
+
+ def set_metadata(self, metadata, # type: Dict[str, str]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets one or more user-defined name-value pairs for the specified
+ file system. Each call to this operation replaces all existing metadata
+ attached to the file system. To remove all metadata from the file system,
+ call this operation with no metadata dict.
+
+ :param metadata:
+ A dict containing name-value pairs to associate with the file system as
+ metadata. Example: {'category':'test'}
+ :type metadata: dict[str, str]
+ :keyword lease:
+ If specified, set_file_system_metadata only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: file system-updated property dict (Etag and last modified).
+ """
+ return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
+
+ def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings]
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+ """Sets system properties on the file or directory.
+
+ If one property is set for the content_settings, all properties will be overriden.
+
+ :param ~...filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set file/directory properties.
+ :keyword lease:
+ If specified, set_file_system_metadata only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~...filedatalake.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: file/directory-updated property dict (Etag and last modified)
+ :rtype: Dict[str, Any]
+ """
+ return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
+
+ def acquire_lease(self, lease_duration=-1, # type: Optional[int]
+ lease_id=None, # type: Optional[str]
+ **kwargs):
+ # type: (...) -> DataLakeLeaseClient
+ """
+ Requests a new lease. If the file or directory does not have an active lease,
+ the DataLake service creates a lease on the file/directory and returns a new
+ lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The DataLake service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+ :rtype: ~...filedatalake.DataLakeLeaseClient
+ """
+ lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore
+ lease.acquire(lease_duration=lease_duration, **kwargs)
+ return lease
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_quick_query_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_quick_query_helper.py
new file mode 100644
index 00000000000..ff67d277a16
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_quick_query_helper.py
@@ -0,0 +1,71 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Union, Iterable, IO # pylint: disable=unused-import
+
+
+class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes
+ """A streaming object to read query results.
+
+ :ivar str name:
+ The name of the blob being quered.
+ :ivar str container:
+ The name of the container where the blob is.
+ :ivar dict response_headers:
+ The response_headers of the quick query request.
+ :ivar bytes record_delimiter:
+ The delimiter used to separate lines, or records with the data. The `records`
+ method will return these lines via a generator.
+ """
+
+ def __init__(
+ self,
+ blob_query_reader
+ ):
+ self.name = blob_query_reader.name
+ self.file_system = blob_query_reader.container
+ self.response_headers = blob_query_reader.response_headers
+ self.record_delimiter = blob_query_reader.record_delimiter
+ self._bytes_processed = 0
+ self._blob_query_reader = blob_query_reader
+
+ def __len__(self):
+ return len(self._blob_query_reader)
+
+ def readall(self):
+ # type: () -> Union[bytes, str]
+ """Return all query results.
+
+ This operation is blocking until all data is downloaded.
+ If encoding has been configured - this will be used to decode individual
+ records are they are received.
+
+ :rtype: Union[bytes, str]
+ """
+ return self._blob_query_reader.readall()
+
+ def readinto(self, stream):
+ # type: (IO) -> None
+ """Download the query result to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream.
+ :returns: None
+ """
+ self._blob_query_reader(stream)
+
+ def records(self):
+ # type: () -> Iterable[Union[bytes, str]]
+ """Returns a record generator for the query result.
+
+ Records will be returned line by line.
+ If encoding has been configured - this will be used to decode individual
+ records are they are received.
+
+ :rtype: Iterable[Union[bytes, str]]
+ """
+ return self._blob_query_reader.records()
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_serialize.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_serialize.py
new file mode 100644
index 00000000000..16b6b911caa
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_serialize.py
@@ -0,0 +1,89 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from ...blob._serialize import _get_match_headers # pylint: disable=protected-access
+from ._shared import encode_base64
+from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \
+ SourceModifiedAccessConditions, LeaseAccessConditions
+
+
+def convert_dfs_url_to_blob_url(dfs_account_url):
+ return dfs_account_url.replace('.dfs.', '.blob.', 1)
+
+
+def convert_datetime_to_rfc1123(date):
+ weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()]
+ month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
+ "Oct", "Nov", "Dec"][date.month - 1]
+ return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, date.day, month,
+ date.year, date.hour, date.minute, date.second)
+
+
+def add_metadata_headers(metadata=None):
+ # type: (Optional[Dict[str, str]]) -> str
+ headers = list()
+ if metadata:
+ for key, value in metadata.items():
+ headers.append(key + '=')
+ headers.append(encode_base64(value))
+ headers.append(',')
+
+ if headers:
+ del headers[-1]
+
+ return ''.join(headers)
+
+
+def get_mod_conditions(kwargs):
+ # type: (Dict[str, Any]) -> ModifiedAccessConditions
+ if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag')
+ return ModifiedAccessConditions(
+ if_modified_since=kwargs.pop('if_modified_since', None),
+ if_unmodified_since=kwargs.pop('if_unmodified_since', None),
+ if_match=if_match or kwargs.pop('if_match', None),
+ if_none_match=if_none_match or kwargs.pop('if_none_match', None)
+ )
+
+
+def get_source_mod_conditions(kwargs):
+ # type: (Dict[str, Any]) -> SourceModifiedAccessConditions
+ if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
+ return SourceModifiedAccessConditions(
+ source_if_modified_since=kwargs.pop('source_if_modified_since', None),
+ source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
+ source_if_match=if_match or kwargs.pop('source_if_match', None),
+ source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None)
+ )
+
+
+def get_path_http_headers(content_settings):
+ path_headers = PathHTTPHeaders(
+ cache_control=content_settings.cache_control,
+ content_type=content_settings.content_type,
+ content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+ content_encoding=content_settings.content_encoding,
+ content_language=content_settings.content_language,
+ content_disposition=content_settings.content_disposition
+ )
+ return path_headers
+
+
+def get_access_conditions(lease):
+ # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
+ try:
+ lease_id = lease.id # type: ignore
+ except AttributeError:
+ lease_id = lease # type: ignore
+ return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
+
+
+def get_lease_id(lease):
+ if not lease:
+ return ""
+ try:
+ lease_id = lease.id
+ except AttributeError:
+ lease_id = lease
+ return lease_id
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/__init__.py
new file mode 100644
index 00000000000..160f8822382
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/__init__.py
@@ -0,0 +1,56 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import hmac
+
+try:
+ from urllib.parse import quote, unquote
+except ImportError:
+ from urllib2 import quote, unquote # type: ignore
+
+import six
+
+
+def url_quote(url):
+ return quote(url)
+
+
+def url_unquote(url):
+ return unquote(url)
+
+
+def encode_base64(data):
+ if isinstance(data, six.text_type):
+ data = data.encode('utf-8')
+ encoded = base64.b64encode(data)
+ return encoded.decode('utf-8')
+
+
+def decode_base64_to_bytes(data):
+ if isinstance(data, six.text_type):
+ data = data.encode('utf-8')
+ return base64.b64decode(data)
+
+
+def decode_base64_to_text(data):
+ decoded_bytes = decode_base64_to_bytes(data)
+ return decoded_bytes.decode('utf-8')
+
+
+def sign_string(key, string_to_sign, key_is_base64=True):
+ if key_is_base64:
+ key = decode_base64_to_bytes(key)
+ else:
+ if isinstance(key, six.text_type):
+ key = key.encode('utf-8')
+ if isinstance(string_to_sign, six.text_type):
+ string_to_sign = string_to_sign.encode('utf-8')
+ signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
+ digest = signed_hmac_sha256.digest()
+ encoded_digest = encode_base64(digest)
+ return encoded_digest
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/authentication.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/authentication.py
new file mode 100644
index 00000000000..d04c1e4fb53
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/authentication.py
@@ -0,0 +1,142 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import sys
+
+try:
+ from urllib.parse import urlparse, unquote
+except ImportError:
+ from urlparse import urlparse # type: ignore
+ from urllib2 import unquote # type: ignore
+
+try:
+ from yarl import URL
+except ImportError:
+ pass
+
+try:
+ from azure.core.pipeline.transport import AioHttpTransport
+except ImportError:
+ AioHttpTransport = None
+
+from azure.core.exceptions import ClientAuthenticationError
+from azure.core.pipeline.policies import SansIOHTTPPolicy
+
+from . import sign_string
+
+
+logger = logging.getLogger(__name__)
+
+
+
+# wraps a given exception with the desired exception type
+def _wrap_exception(ex, desired_type):
+ msg = ""
+ if ex.args:
+ msg = ex.args[0]
+ if sys.version_info >= (3,):
+ # Automatic chaining in Python 3 means we keep the trace
+ return desired_type(msg)
+ # There isn't a good solution in 2 for keeping the stack trace
+ # in general, or that will not result in an error in 3
+ # However, we can keep the previous error type and message
+ # TODO: In the future we will log the trace
+ return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
+
+
+class AzureSigningError(ClientAuthenticationError):
+ """
+ Represents a fatal error when attempting to sign a request.
+ In general, the cause of this exception is user error. For example, the given account key is not valid.
+ Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
+ """
+
+
+# pylint: disable=no-self-use
+class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
+
+ def __init__(self, account_name, account_key):
+ self.account_name = account_name
+ self.account_key = account_key
+ super(SharedKeyCredentialPolicy, self).__init__()
+
+ @staticmethod
+ def _get_headers(request, headers_to_sign):
+ headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
+ if 'content-length' in headers and headers['content-length'] == '0':
+ del headers['content-length']
+ return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
+
+ @staticmethod
+ def _get_verb(request):
+ return request.http_request.method + '\n'
+
+ def _get_canonicalized_resource(self, request):
+ uri_path = urlparse(request.http_request.url).path
+ try:
+ if isinstance(request.context.transport, AioHttpTransport) or \
+ isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \
+ isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None),
+ AioHttpTransport):
+ uri_path = URL(uri_path)
+ return '/' + self.account_name + str(uri_path)
+ except TypeError:
+ pass
+ return '/' + self.account_name + uri_path
+
+ @staticmethod
+ def _get_canonicalized_headers(request):
+ string_to_sign = ''
+ x_ms_headers = []
+ for name, value in request.http_request.headers.items():
+ if name.startswith('x-ms-'):
+ x_ms_headers.append((name.lower(), value))
+ x_ms_headers.sort()
+ for name, value in x_ms_headers:
+ if value is not None:
+ string_to_sign += ''.join([name, ':', value, '\n'])
+ return string_to_sign
+
+ @staticmethod
+ def _get_canonicalized_resource_query(request):
+ sorted_queries = list(request.http_request.query.items())
+ sorted_queries.sort()
+
+ string_to_sign = ''
+ for name, value in sorted_queries:
+ if value is not None:
+ string_to_sign += '\n' + name.lower() + ':' + unquote(value)
+
+ return string_to_sign
+
+ def _add_authorization_header(self, request, string_to_sign):
+ try:
+ signature = sign_string(self.account_key, string_to_sign)
+ auth_string = 'SharedKey ' + self.account_name + ':' + signature
+ request.http_request.headers['Authorization'] = auth_string
+ except Exception as ex:
+ # Wrap any error that occurred as signing error
+ # Doing so will clarify/locate the source of problem
+ raise _wrap_exception(ex, AzureSigningError)
+
+ def on_request(self, request):
+ string_to_sign = \
+ self._get_verb(request) + \
+ self._get_headers(
+ request,
+ [
+ 'content-encoding', 'content-language', 'content-length',
+ 'content-md5', 'content-type', 'date', 'if-modified-since',
+ 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
+ ]
+ ) + \
+ self._get_canonicalized_headers(request) + \
+ self._get_canonicalized_resource(request) + \
+ self._get_canonicalized_resource_query(request)
+
+ self._add_authorization_header(request, string_to_sign)
+ #logger.debug("String_to_sign=%s", string_to_sign)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/base_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/base_client.py
new file mode 100644
index 00000000000..5e524b2dc3d
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/base_client.py
@@ -0,0 +1,459 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import logging
+import uuid
+from typing import ( # pylint: disable=unused-import
+ Optional,
+ Any,
+ Tuple,
+)
+
+try:
+ from urllib.parse import parse_qs, quote
+except ImportError:
+ from urlparse import parse_qs # type: ignore
+ from urllib2 import quote # type: ignore
+
+import six
+
+from azure.core.configuration import Configuration
+from azure.core.credentials import AzureSasCredential
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import Pipeline
+from azure.core.pipeline.transport import RequestsTransport, HttpTransport
+from azure.core.pipeline.policies import (
+ RedirectPolicy,
+ ContentDecodePolicy,
+ BearerTokenCredentialPolicy,
+ ProxyPolicy,
+ DistributedTracingPolicy,
+ HttpLoggingPolicy,
+ UserAgentPolicy,
+ AzureSasCredentialPolicy
+)
+
+from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT
+from .models import LocationMode
+from .authentication import SharedKeyCredentialPolicy
+from .shared_access_signature import QueryStringConstants
+from .request_handlers import serialize_batch_body, _get_batch_request_delimiter
+from .policies import (
+ StorageHeadersPolicy,
+ StorageContentValidation,
+ StorageRequestHook,
+ StorageResponseHook,
+ StorageLoggingPolicy,
+ StorageHosts,
+ QueueMessagePolicy,
+ ExponentialRetry,
+)
+from .._version import VERSION
+from .response_handlers import process_storage_error, PartialBatchErrorException
+
+
+_LOGGER = logging.getLogger(__name__)
+_SERVICE_PARAMS = {
+ "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"},
+ "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"},
+ "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"},
+ "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"},
+}
+
+class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes
+ def __init__(
+ self,
+ parsed_url, # type: Any
+ service, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
+ self._hosts = kwargs.get("_hosts")
+ self.scheme = parsed_url.scheme
+
+ if service not in ["blob", "queue", "file-share", "dfs"]:
+ raise ValueError("Invalid service: {}".format(service))
+ service_name = service.split('-')[0]
+ account = parsed_url.netloc.split(".{}.core.".format(service_name))
+
+ self.account_name = account[0] if len(account) > 1 else None
+ if not self.account_name and parsed_url.netloc.startswith("localhost") \
+ or parsed_url.netloc.startswith("127.0.0.1"):
+ self.account_name = parsed_url.path.strip("/")
+
+ self.credential = _format_shared_key_credential(self.account_name, credential)
+ if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
+ raise ValueError("Token credential is only supported with HTTPS.")
+
+ secondary_hostname = None
+ if hasattr(self.credential, "account_name"):
+ self.account_name = self.credential.account_name
+ secondary_hostname = "{}-secondary.{}.{}".format(
+ self.credential.account_name, service_name, SERVICE_HOST_BASE)
+
+ if not self._hosts:
+ if len(account) > 1:
+ secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
+ if kwargs.get("secondary_hostname"):
+ secondary_hostname = kwargs["secondary_hostname"]
+ primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
+ self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
+
+ self.require_encryption = kwargs.get("require_encryption", False)
+ self.key_encryption_key = kwargs.get("key_encryption_key")
+ self.key_resolver_function = kwargs.get("key_resolver_function")
+ self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)
+
+ def __enter__(self):
+ self._client.__enter__()
+ return self
+
+ def __exit__(self, *args):
+ self._client.__exit__(*args)
+
+ def close(self):
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ self._client.close()
+
+ @property
+ def url(self):
+ """The full endpoint URL to this entity, including SAS token if used.
+
+ This could be either the primary endpoint,
+ or the secondary endpoint depending on the current :func:`location_mode`.
+ """
+ return self._format_url(self._hosts[self._location_mode])
+
+ @property
+ def primary_endpoint(self):
+ """The full primary endpoint URL.
+
+ :type: str
+ """
+ return self._format_url(self._hosts[LocationMode.PRIMARY])
+
+ @property
+ def primary_hostname(self):
+ """The hostname of the primary endpoint.
+
+ :type: str
+ """
+ return self._hosts[LocationMode.PRIMARY]
+
+ @property
+ def secondary_endpoint(self):
+ """The full secondary endpoint URL if configured.
+
+ If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
+ `secondary_hostname` keyword argument on instantiation.
+
+ :type: str
+ :raise ValueError:
+ """
+ if not self._hosts[LocationMode.SECONDARY]:
+ raise ValueError("No secondary host configured.")
+ return self._format_url(self._hosts[LocationMode.SECONDARY])
+
+ @property
+ def secondary_hostname(self):
+ """The hostname of the secondary endpoint.
+
+ If not available this will be None. To explicitly specify a secondary hostname, use the optional
+ `secondary_hostname` keyword argument on instantiation.
+
+ :type: str or None
+ """
+ return self._hosts[LocationMode.SECONDARY]
+
+ @property
+ def location_mode(self):
+ """The location mode that the client is currently using.
+
+ By default this will be "primary". Options include "primary" and "secondary".
+
+ :type: str
+ """
+
+ return self._location_mode
+
+ @location_mode.setter
+ def location_mode(self, value):
+ if self._hosts.get(value):
+ self._location_mode = value
+ self._client._config.url = self.url # pylint: disable=protected-access
+ else:
+ raise ValueError("No host URL for location mode: {}".format(value))
+
+ @property
+ def api_version(self):
+ """The version of the Storage API used for requests.
+
+ :type: str
+ """
+ return self._client._config.version # pylint: disable=protected-access
+
+ def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None):
+ query_str = "?"
+ if snapshot:
+ query_str += "snapshot={}&".format(self.snapshot)
+ if share_snapshot:
+ query_str += "sharesnapshot={}&".format(self.snapshot)
+ if sas_token and isinstance(credential, AzureSasCredential):
+ raise ValueError(
+ "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.")
+ if sas_token and not credential:
+ query_str += sas_token
+ elif is_credential_sastoken(credential):
+ query_str += credential.lstrip("?")
+ credential = None
+ return query_str.rstrip("?&"), credential
+
+ def _create_pipeline(self, credential, **kwargs):
+ # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
+ self._credential_policy = None
+ if hasattr(credential, "get_token"):
+ self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
+ elif isinstance(credential, SharedKeyCredentialPolicy):
+ self._credential_policy = credential
+ elif isinstance(credential, AzureSasCredential):
+ self._credential_policy = AzureSasCredentialPolicy(credential)
+ elif credential is not None:
+ raise TypeError("Unsupported credential: {}".format(credential))
+
+ config = kwargs.get("_configuration") or create_configuration(**kwargs)
+ if kwargs.get("_pipeline"):
+ return config, kwargs["_pipeline"]
+ config.transport = kwargs.get("transport") # type: ignore
+ kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+ kwargs.setdefault("read_timeout", READ_TIMEOUT)
+ if not config.transport:
+ config.transport = RequestsTransport(**kwargs)
+ policies = [
+ QueueMessagePolicy(),
+ config.proxy_policy,
+ config.user_agent_policy,
+ StorageContentValidation(),
+ ContentDecodePolicy(response_encoding="utf-8"),
+ RedirectPolicy(**kwargs),
+ StorageHosts(hosts=self._hosts, **kwargs),
+ config.retry_policy,
+ config.headers_policy,
+ StorageRequestHook(**kwargs),
+ self._credential_policy,
+ config.logging_policy,
+ StorageResponseHook(**kwargs),
+ DistributedTracingPolicy(**kwargs),
+ HttpLoggingPolicy(**kwargs)
+ ]
+ if kwargs.get("_additional_pipeline_policies"):
+ policies = policies + kwargs.get("_additional_pipeline_policies")
+ return config, Pipeline(config.transport, policies=policies)
+
+ def _batch_send(
+ self,
+ *reqs, # type: HttpRequest
+ **kwargs
+ ):
+ """Given a series of request, do a Storage batch call.
+ """
+ # Pop it here, so requests doesn't feel bad about additional kwarg
+ raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+ batch_id = str(uuid.uuid1())
+
+ request = self._client._client.post( # pylint: disable=protected-access
+ url='{}://{}/{}?{}comp=batch{}{}'.format(
+ self.scheme,
+ self.primary_hostname,
+ kwargs.pop('path', ""),
+ kwargs.pop('restype', ""),
+ kwargs.pop('sas', ""),
+ kwargs.pop('timeout', "")
+ ),
+ headers={
+ 'x-ms-version': self.api_version,
+ "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False)
+ }
+ )
+
+ policies = [StorageHeadersPolicy()]
+ if self._credential_policy:
+ policies.append(self._credential_policy)
+
+ request.set_multipart_mixed(
+ *reqs,
+ policies=policies,
+ enforce_https=False
+ )
+
+ Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access
+ body = serialize_batch_body(request.multipart_mixed_info[0], batch_id)
+ request.set_bytes_body(body)
+
+ temp = request.multipart_mixed_info
+ request.multipart_mixed_info = None
+ pipeline_response = self._pipeline.run(
+ request, **kwargs
+ )
+ response = pipeline_response.http_response
+ request.multipart_mixed_info = temp
+
+ try:
+ if response.status_code not in [202]:
+ raise HttpResponseError(response=response)
+ parts = response.parts()
+ if raise_on_any_failure:
+ parts = list(response.parts())
+ if any(p for p in parts if not 200 <= p.status_code < 300):
+ error = PartialBatchErrorException(
+ message="There is a partial failure in the batch operation.",
+ response=response, parts=parts
+ )
+ raise error
+ return iter(parts)
+ return parts
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+class TransportWrapper(HttpTransport):
+ """Wrapper class that ensures that an inner client created
+ by a `get_client` method does not close the outer transport for the parent
+ when used in a context manager.
+ """
+ def __init__(self, transport):
+ self._transport = transport
+
+ def send(self, request, **kwargs):
+ return self._transport.send(request, **kwargs)
+
+ def open(self):
+ pass
+
+ def close(self):
+ pass
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args): # pylint: disable=arguments-differ
+ pass
+
+
+def _format_shared_key_credential(account_name, credential):
+ if isinstance(credential, six.string_types):
+ if not account_name:
+ raise ValueError("Unable to determine account name for shared key credential.")
+ credential = {"account_name": account_name, "account_key": credential}
+ if isinstance(credential, dict):
+ if "account_name" not in credential:
+ raise ValueError("Shared key credential missing 'account_name")
+ if "account_key" not in credential:
+ raise ValueError("Shared key credential missing 'account_key")
+ return SharedKeyCredentialPolicy(**credential)
+ return credential
+
+
+def parse_connection_str(conn_str, credential, service):
+ conn_str = conn_str.rstrip(";")
+ conn_settings = [s.split("=", 1) for s in conn_str.split(";")]
+ if any(len(tup) != 2 for tup in conn_settings):
+ raise ValueError("Connection string is either blank or malformed.")
+ conn_settings = dict((key.upper(), val) for key, val in conn_settings)
+ endpoints = _SERVICE_PARAMS[service]
+ primary = None
+ secondary = None
+ if not credential:
+ try:
+ credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]}
+ except KeyError:
+ credential = conn_settings.get("SHAREDACCESSSIGNATURE")
+ if endpoints["primary"] in conn_settings:
+ primary = conn_settings[endpoints["primary"]]
+ if endpoints["secondary"] in conn_settings:
+ secondary = conn_settings[endpoints["secondary"]]
+ else:
+ if endpoints["secondary"] in conn_settings:
+ raise ValueError("Connection string specifies only secondary endpoint.")
+ try:
+ primary = "{}://{}.{}.{}".format(
+ conn_settings["DEFAULTENDPOINTSPROTOCOL"],
+ conn_settings["ACCOUNTNAME"],
+ service,
+ conn_settings["ENDPOINTSUFFIX"],
+ )
+ secondary = "{}-secondary.{}.{}".format(
+ conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"]
+ )
+ except KeyError:
+ pass
+
+ if not primary:
+ try:
+ primary = "https://{}.{}.{}".format(
+ conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE)
+ )
+ except KeyError:
+ raise ValueError("Connection string missing required connection details.")
+ return primary, secondary, credential
+
+
+def create_configuration(**kwargs):
+ # type: (**Any) -> Configuration
+ config = Configuration(**kwargs)
+ config.headers_policy = StorageHeadersPolicy(**kwargs)
+ config.user_agent_policy = UserAgentPolicy(
+ sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs)
+ config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+ config.logging_policy = StorageLoggingPolicy(**kwargs)
+ config.proxy_policy = ProxyPolicy(**kwargs)
+
+ # Storage settings
+ config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024)
+ config.copy_polling_interval = 15
+
+ # Block blob uploads
+ config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024)
+ config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1)
+ config.use_byte_buffer = kwargs.get("use_byte_buffer", False)
+
+ # Page blob uploads
+ config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024)
+
+ # Datalake file uploads
+ config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1)
+
+ # Blob downloads
+ config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024)
+ config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024)
+
+ # File uploads
+ config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024)
+ return config
+
+
+def parse_query(query_str):
+ sas_values = QueryStringConstants.to_list()
+ parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
+ sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values]
+ sas_token = None
+ if sas_params:
+ sas_token = "&".join(sas_params)
+
+ snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
+ return snapshot, sas_token
+
+
+def is_credential_sastoken(credential):
+ if not credential or not isinstance(credential, six.string_types):
+ return False
+
+ sas_values = QueryStringConstants.to_list()
+ parsed_query = parse_qs(credential.lstrip("?"))
+ if parsed_query and all([k in sas_values for k in parsed_query.keys()]):
+ return True
+ return False
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/base_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/base_client_async.py
new file mode 100644
index 00000000000..091c350b489
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/base_client_async.py
@@ -0,0 +1,183 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
+ TYPE_CHECKING
+)
+import logging
+
+from azure.core.credentials import AzureSasCredential
+from azure.core.pipeline import AsyncPipeline
+from azure.core.async_paging import AsyncList
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline.policies import (
+ ContentDecodePolicy,
+ AsyncBearerTokenCredentialPolicy,
+ AsyncRedirectPolicy,
+ DistributedTracingPolicy,
+ HttpLoggingPolicy,
+ AzureSasCredentialPolicy,
+)
+from azure.core.pipeline.transport import AsyncHttpTransport
+
+from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT
+from .authentication import SharedKeyCredentialPolicy
+from .base_client import create_configuration
+from .policies import (
+ StorageContentValidation,
+ StorageRequestHook,
+ StorageHosts,
+ StorageHeadersPolicy,
+ QueueMessagePolicy
+)
+from .policies_async import AsyncStorageResponseHook
+
+from .response_handlers import process_storage_error, PartialBatchErrorException
+
+if TYPE_CHECKING:
+ from azure.core.pipeline import Pipeline
+ from azure.core.pipeline.transport import HttpRequest
+ from azure.core.configuration import Configuration
+_LOGGER = logging.getLogger(__name__)
+
+
+class AsyncStorageAccountHostsMixin(object):
+
+ def __enter__(self):
+ raise TypeError("Async client only supports 'async with'.")
+
+ def __exit__(self, *args):
+ pass
+
+ async def __aenter__(self):
+ await self._client.__aenter__()
+ return self
+
+ async def __aexit__(self, *args):
+ await self._client.__aexit__(*args)
+
+ async def close(self):
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ await self._client.close()
+
+ def _create_pipeline(self, credential, **kwargs):
+ # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
+ self._credential_policy = None
+ if hasattr(credential, 'get_token'):
+ self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
+ elif isinstance(credential, SharedKeyCredentialPolicy):
+ self._credential_policy = credential
+ elif isinstance(credential, AzureSasCredential):
+ self._credential_policy = AzureSasCredentialPolicy(credential)
+ elif credential is not None:
+ raise TypeError("Unsupported credential: {}".format(credential))
+ config = kwargs.get('_configuration') or create_configuration(**kwargs)
+ if kwargs.get('_pipeline'):
+ return config, kwargs['_pipeline']
+ config.transport = kwargs.get('transport') # type: ignore
+ kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+ kwargs.setdefault("read_timeout", READ_TIMEOUT)
+ if not config.transport:
+ try:
+ from azure.core.pipeline.transport import AioHttpTransport
+ except ImportError:
+ raise ImportError("Unable to create async transport. Please check aiohttp is installed.")
+ config.transport = AioHttpTransport(**kwargs)
+ policies = [
+ QueueMessagePolicy(),
+ config.headers_policy,
+ config.proxy_policy,
+ config.user_agent_policy,
+ StorageContentValidation(),
+ StorageRequestHook(**kwargs),
+ self._credential_policy,
+ ContentDecodePolicy(response_encoding="utf-8"),
+ AsyncRedirectPolicy(**kwargs),
+ StorageHosts(hosts=self._hosts, **kwargs), # type: ignore
+ config.retry_policy,
+ config.logging_policy,
+ AsyncStorageResponseHook(**kwargs),
+ DistributedTracingPolicy(**kwargs),
+ HttpLoggingPolicy(**kwargs),
+ ]
+ if kwargs.get("_additional_pipeline_policies"):
+ policies = policies + kwargs.get("_additional_pipeline_policies")
+ return config, AsyncPipeline(config.transport, policies=policies)
+
+ async def _batch_send(
+ self, *reqs: 'HttpRequest',
+ **kwargs
+ ):
+ """Given a series of request, do a Storage batch call.
+ """
+ # Pop it here, so requests doesn't feel bad about additional kwarg
+ raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+ request = self._client._client.post( # pylint: disable=protected-access
+ url='https://{}/?comp=batch'.format(self.primary_hostname),
+ headers={
+ 'x-ms-version': self.api_version
+ }
+ )
+
+ request.set_multipart_mixed(
+ *reqs,
+ policies=[
+ StorageHeadersPolicy(),
+ self._credential_policy
+ ],
+ enforce_https=False
+ )
+
+ pipeline_response = await self._pipeline.run(
+ request, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ try:
+ if response.status_code not in [202]:
+ raise HttpResponseError(response=response)
+ parts = response.parts() # Return an AsyncIterator
+ if raise_on_any_failure:
+ parts_list = []
+ async for part in parts:
+ parts_list.append(part)
+ if any(p for p in parts_list if not 200 <= p.status_code < 300):
+ error = PartialBatchErrorException(
+ message="There is a partial failure in the batch operation.",
+ response=response, parts=parts_list
+ )
+ raise error
+ return AsyncList(parts_list)
+ return parts
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+
+class AsyncTransportWrapper(AsyncHttpTransport):
+ """Wrapper class that ensures that an inner client created
+ by a `get_client` method does not close the outer transport for the parent
+ when used in a context manager.
+ """
+ def __init__(self, async_transport):
+ self._transport = async_transport
+
+ async def send(self, request, **kwargs):
+ return await self._transport.send(request, **kwargs)
+
+ async def open(self):
+ pass
+
+ async def close(self):
+ pass
+
+ async def __aenter__(self):
+ pass
+
+ async def __aexit__(self, *args): # pylint: disable=arguments-differ
+ pass
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/constants.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/constants.py
new file mode 100644
index 00000000000..a50e8b5a6a7
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/constants.py
@@ -0,0 +1,27 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import sys
+
+from .._generated import AzureDataLakeStorageRESTAPI
+
+
+X_MS_VERSION = AzureDataLakeStorageRESTAPI(url="get_api_version")._config.version # pylint: disable=protected-access
+
+# Socket timeout in seconds
+CONNECTION_TIMEOUT = 20
+READ_TIMEOUT = 20
+
+# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
+# The socket timeout is now the maximum total duration to send all data.
+if sys.version_info >= (3, 5):
+ # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
+ # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
+ READ_TIMEOUT = 2000
+
+STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
+
+SERVICE_HOST_BASE = 'core.windows.net'
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/encryption.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/encryption.py
new file mode 100644
index 00000000000..62607cc0cf8
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/encryption.py
@@ -0,0 +1,542 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import os
+from os import urandom
+from json import (
+ dumps,
+ loads,
+)
+from collections import OrderedDict
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import Cipher
+from cryptography.hazmat.primitives.ciphers.algorithms import AES
+from cryptography.hazmat.primitives.ciphers.modes import CBC
+from cryptography.hazmat.primitives.padding import PKCS7
+
+from azure.core.exceptions import HttpResponseError
+
+from .._version import VERSION
+from . import encode_base64, decode_base64_to_bytes
+
+
+_ENCRYPTION_PROTOCOL_V1 = '1.0'
+_ERROR_OBJECT_INVALID = \
+ '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
+
+
+def _validate_not_none(param_name, param):
+ if param is None:
+ raise ValueError('{0} should not be None.'.format(param_name))
+
+
+def _validate_key_encryption_key_wrap(kek):
+ # Note that None is not callable and so will fail the second clause of each check.
+ if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
+ if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
+ if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
+
+
+class _EncryptionAlgorithm(object):
+ '''
+ Specifies which client encryption algorithm is used.
+ '''
+ AES_CBC_256 = 'AES_CBC_256'
+
+
+class _WrappedContentKey:
+ '''
+ Represents the envelope key details stored on the service.
+ '''
+
+ def __init__(self, algorithm, encrypted_key, key_id):
+ '''
+ :param str algorithm:
+ The algorithm used for wrapping.
+ :param bytes encrypted_key:
+ The encrypted content-encryption-key.
+ :param str key_id:
+ The key-encryption-key identifier string.
+ '''
+
+ _validate_not_none('algorithm', algorithm)
+ _validate_not_none('encrypted_key', encrypted_key)
+ _validate_not_none('key_id', key_id)
+
+ self.algorithm = algorithm
+ self.encrypted_key = encrypted_key
+ self.key_id = key_id
+
+
+class _EncryptionAgent:
+ '''
+ Represents the encryption agent stored on the service.
+ It consists of the encryption protocol version and encryption algorithm used.
+ '''
+
+ def __init__(self, encryption_algorithm, protocol):
+ '''
+ :param _EncryptionAlgorithm encryption_algorithm:
+ The algorithm used for encrypting the message contents.
+ :param str protocol:
+ The protocol version used for encryption.
+ '''
+
+ _validate_not_none('encryption_algorithm', encryption_algorithm)
+ _validate_not_none('protocol', protocol)
+
+ self.encryption_algorithm = str(encryption_algorithm)
+ self.protocol = protocol
+
+
+class _EncryptionData:
+ '''
+ Represents the encryption data that is stored on the service.
+ '''
+
+ def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
+ key_wrapping_metadata):
+ '''
+ :param bytes content_encryption_IV:
+ The content encryption initialization vector.
+ :param _EncryptionAgent encryption_agent:
+ The encryption agent.
+ :param _WrappedContentKey wrapped_content_key:
+ An object that stores the wrapping algorithm, the key identifier,
+ and the encrypted key bytes.
+ :param dict key_wrapping_metadata:
+ A dict containing metadata related to the key wrapping.
+ '''
+
+ _validate_not_none('content_encryption_IV', content_encryption_IV)
+ _validate_not_none('encryption_agent', encryption_agent)
+ _validate_not_none('wrapped_content_key', wrapped_content_key)
+
+ self.content_encryption_IV = content_encryption_IV
+ self.encryption_agent = encryption_agent
+ self.wrapped_content_key = wrapped_content_key
+ self.key_wrapping_metadata = key_wrapping_metadata
+
+
+def _generate_encryption_data_dict(kek, cek, iv):
+ '''
+ Generates and returns the encryption metadata as a dict.
+
+ :param object kek: The key encryption key. See calling functions for more information.
+ :param bytes cek: The content encryption key.
+ :param bytes iv: The initialization vector.
+ :return: A dict containing all the encryption metadata.
+ :rtype: dict
+ '''
+ # Encrypt the cek.
+ wrapped_cek = kek.wrap_key(cek)
+
+ # Build the encryption_data dict.
+ # Use OrderedDict to comply with Java's ordering requirement.
+ wrapped_content_key = OrderedDict()
+ wrapped_content_key['KeyId'] = kek.get_kid()
+ wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
+ wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
+
+ encryption_agent = OrderedDict()
+ encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
+ encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
+
+ encryption_data_dict = OrderedDict()
+ encryption_data_dict['WrappedContentKey'] = wrapped_content_key
+ encryption_data_dict['EncryptionAgent'] = encryption_agent
+ encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
+ encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION}
+
+ return encryption_data_dict
+
+
+def _dict_to_encryption_data(encryption_data_dict):
+ '''
+ Converts the specified dictionary to an EncryptionData object for
+ eventual use in decryption.
+
+ :param dict encryption_data_dict:
+ The dictionary containing the encryption data.
+ :return: an _EncryptionData object built from the dictionary.
+ :rtype: _EncryptionData
+ '''
+ try:
+ if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
+ raise ValueError("Unsupported encryption version.")
+ except KeyError:
+ raise ValueError("Unsupported encryption version.")
+ wrapped_content_key = encryption_data_dict['WrappedContentKey']
+ wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
+ decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
+ wrapped_content_key['KeyId'])
+
+ encryption_agent = encryption_data_dict['EncryptionAgent']
+ encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
+ encryption_agent['Protocol'])
+
+ if 'KeyWrappingMetadata' in encryption_data_dict:
+ key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
+ else:
+ key_wrapping_metadata = None
+
+ encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
+ encryption_agent,
+ wrapped_content_key,
+ key_wrapping_metadata)
+
+ return encryption_data
+
+
+def _generate_AES_CBC_cipher(cek, iv):
+ '''
+ Generates and returns an encryption cipher for AES CBC using the given cek and iv.
+
+ :param bytes[] cek: The content encryption key for the cipher.
+ :param bytes[] iv: The initialization vector for the cipher.
+ :return: A cipher for encrypting in AES256 CBC.
+ :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
+ '''
+
+ backend = default_backend()
+ algorithm = AES(cek)
+ mode = CBC(iv)
+ return Cipher(algorithm, mode, backend)
+
+
+def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
+ '''
+ Extracts and returns the content_encryption_key stored in the encryption_data object
+ and performs necessary validation on all parameters.
+ :param _EncryptionData encryption_data:
+ The encryption metadata of the retrieved value.
+ :param obj key_encryption_key:
+ The key_encryption_key used to unwrap the cek. Please refer to high-level service object
+ instance variables for more details.
+ :param func key_resolver:
+ A function used that, given a key_id, will return a key_encryption_key. Please refer
+ to high-level service object instance variables for more details.
+ :return: the content_encryption_key stored in the encryption_data object.
+ :rtype: bytes[]
+ '''
+
+ _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
+ _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
+
+ if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol:
+ raise ValueError('Encryption version is not supported.')
+
+ content_encryption_key = None
+
+ # If the resolver exists, give priority to the key it finds.
+ if key_resolver is not None:
+ key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
+
+ _validate_not_none('key_encryption_key', key_encryption_key)
+ if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
+ if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
+ if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
+ raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
+ # Will throw an exception if the specified algorithm is not supported.
+ content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
+ encryption_data.wrapped_content_key.algorithm)
+ _validate_not_none('content_encryption_key', content_encryption_key)
+
+ return content_encryption_key
+
+
+def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None):
+ '''
+ Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
+ Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
+ Returns the original plaintex.
+
+ :param str message:
+ The ciphertext to be decrypted.
+ :param _EncryptionData encryption_data:
+ The metadata associated with this ciphertext.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ unwrap_key(key, algorithm)
+ - returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
+ get_kid()
+ - returns a string key id for this key-encryption-key.
+ :param function resolver(kid):
+ The user-provided key resolver. Uses the kid string to return a key-encryption-key
+ implementing the interface defined above.
+ :return: The decrypted plaintext.
+ :rtype: str
+ '''
+ _validate_not_none('message', message)
+ content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
+
+ if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm:
+ raise ValueError('Specified encryption algorithm is not supported.')
+
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
+
+ # decrypt data
+ decrypted_data = message
+ decryptor = cipher.decryptor()
+ decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
+
+ # unpad data
+ unpadder = PKCS7(128).unpadder()
+ decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
+
+ return decrypted_data
+
+
+def encrypt_blob(blob, key_encryption_key):
+ '''
+ Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
+ Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
+ Returns a json-formatted string containing the encryption metadata. This method should
+ only be used when a blob is small enough for single shot upload. Encrypting larger blobs
+ is done as a part of the upload_data_chunks method.
+
+ :param bytes blob:
+ The blob to be encrypted.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
+ get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
+ get_kid()--returns a string key id for this key-encryption-key.
+ :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
+ :rtype: (str, bytes)
+ '''
+
+ _validate_not_none('blob', blob)
+ _validate_not_none('key_encryption_key', key_encryption_key)
+ _validate_key_encryption_key_wrap(key_encryption_key)
+
+ # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
+ content_encryption_key = urandom(32)
+ initialization_vector = urandom(16)
+
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
+
+ # PKCS7 with 16 byte blocks ensures compatibility with AES.
+ padder = PKCS7(128).padder()
+ padded_data = padder.update(blob) + padder.finalize()
+
+ # Encrypt the data.
+ encryptor = cipher.encryptor()
+ encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
+ encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
+ initialization_vector)
+ encryption_data['EncryptionMode'] = 'FullBlob'
+
+ return dumps(encryption_data), encrypted_data
+
+
+def generate_blob_encryption_data(key_encryption_key):
+ '''
+ Generates the encryption_metadata for the blob.
+
+ :param bytes key_encryption_key:
+ The key-encryption-key used to wrap the cek associate with this blob.
+ :return: A tuple containing the cek and iv for this blob as well as the
+ serialized encryption metadata for the blob.
+ :rtype: (bytes, bytes, str)
+ '''
+ encryption_data = None
+ content_encryption_key = None
+ initialization_vector = None
+ if key_encryption_key:
+ _validate_key_encryption_key_wrap(key_encryption_key)
+ content_encryption_key = urandom(32)
+ initialization_vector = urandom(16)
+ encryption_data = _generate_encryption_data_dict(key_encryption_key,
+ content_encryption_key,
+ initialization_vector)
+ encryption_data['EncryptionMode'] = 'FullBlob'
+ encryption_data = dumps(encryption_data)
+
+ return content_encryption_key, initialization_vector, encryption_data
+
+
+def decrypt_blob(require_encryption, key_encryption_key, key_resolver,
+ content, start_offset, end_offset, response_headers):
+ '''
+ Decrypts the given blob contents and returns only the requested range.
+
+ :param bool require_encryption:
+ Whether or not the calling blob service requires objects to be decrypted.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
+ get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
+ get_kid()--returns a string key id for this key-encryption-key.
+ :param key_resolver(kid):
+ The user-provided key resolver. Uses the kid string to return a key-encryption-key
+ implementing the interface defined above.
+ :return: The decrypted blob content.
+ :rtype: bytes
+ '''
+ try:
+ encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
+ except: # pylint: disable=bare-except
+ if require_encryption:
+ raise ValueError(
+ 'Encryption required, but received data does not contain appropriate metatadata.' + \
+ 'Data was either not encrypted or metadata has been lost.')
+
+ return content
+
+ if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256:
+ raise ValueError('Specified encryption algorithm is not supported.')
+
+ blob_type = response_headers['x-ms-blob-type']
+
+ iv = None
+ unpad = False
+ if 'content-range' in response_headers:
+ content_range = response_headers['content-range']
+ # Format: 'bytes x-y/size'
+
+ # Ignore the word 'bytes'
+ content_range = content_range.split(' ')
+
+ content_range = content_range[1].split('-')
+ content_range = content_range[1].split('/')
+ end_range = int(content_range[0])
+ blob_size = int(content_range[1])
+
+ if start_offset >= 16:
+ iv = content[:16]
+ content = content[16:]
+ start_offset -= 16
+ else:
+ iv = encryption_data.content_encryption_IV
+
+ if end_range == blob_size - 1:
+ unpad = True
+ else:
+ unpad = True
+ iv = encryption_data.content_encryption_IV
+
+ if blob_type == 'PageBlob':
+ unpad = False
+
+ content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
+ decryptor = cipher.decryptor()
+
+ content = decryptor.update(content) + decryptor.finalize()
+ if unpad:
+ unpadder = PKCS7(128).unpadder()
+ content = unpadder.update(content) + unpadder.finalize()
+
+ return content[start_offset: len(content) - end_offset]
+
+
+def get_blob_encryptor_and_padder(cek, iv, should_pad):
+ encryptor = None
+ padder = None
+
+ if cek is not None and iv is not None:
+ cipher = _generate_AES_CBC_cipher(cek, iv)
+ encryptor = cipher.encryptor()
+ padder = PKCS7(128).padder() if should_pad else None
+
+ return encryptor, padder
+
+
+def encrypt_queue_message(message, key_encryption_key):
+ '''
+ Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
+ Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
+ Returns a json-formatted string containing the encrypted message and the encryption metadata.
+
+ :param object message:
+ The plain text messge to be encrypted.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
+ get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
+ get_kid()--returns a string key id for this key-encryption-key.
+ :return: A json-formatted string containing the encrypted message and the encryption metadata.
+ :rtype: str
+ '''
+
+ _validate_not_none('message', message)
+ _validate_not_none('key_encryption_key', key_encryption_key)
+ _validate_key_encryption_key_wrap(key_encryption_key)
+
+ # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
+ content_encryption_key = os.urandom(32)
+ initialization_vector = os.urandom(16)
+
+ # Queue encoding functions all return unicode strings, and encryption should
+ # operate on binary strings.
+ message = message.encode('utf-8')
+
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
+
+ # PKCS7 with 16 byte blocks ensures compatibility with AES.
+ padder = PKCS7(128).padder()
+ padded_data = padder.update(message) + padder.finalize()
+
+ # Encrypt the data.
+ encryptor = cipher.encryptor()
+ encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
+
+ # Build the dictionary structure.
+ queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
+ 'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
+ content_encryption_key,
+ initialization_vector)}
+
+ return dumps(queue_message)
+
+
+def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver):
+ '''
+ Returns the decrypted message contents from an EncryptedQueueMessage.
+ If no encryption metadata is present, will return the unaltered message.
+ :param str message:
+ The JSON formatted QueueEncryptedMessage contents with all associated metadata.
+ :param bool require_encryption:
+ If set, will enforce that the retrieved messages are encrypted and decrypt them.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ unwrap_key(key, algorithm)
+ - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm.
+ get_kid()
+ - returns a string key id for this key-encryption-key.
+ :param function resolver(kid):
+ The user-provided key resolver. Uses the kid string to return a key-encryption-key
+ implementing the interface defined above.
+ :return: The plain text message from the queue message.
+ :rtype: str
+ '''
+
+ try:
+ message = loads(message)
+
+ encryption_data = _dict_to_encryption_data(message['EncryptionData'])
+ decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents'])
+ except (KeyError, ValueError):
+ # Message was not json formatted and so was not encrypted
+ # or the user provided a json formatted message.
+ if require_encryption:
+ raise ValueError('Message was not encrypted.')
+
+ return message
+ try:
+ return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
+ except Exception as error:
+ raise HttpResponseError(
+ message="Decryption failed.",
+ response=response,
+ error=error)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/models.py
new file mode 100644
index 00000000000..d911f915901
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/models.py
@@ -0,0 +1,468 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-instance-attributes
+
+from enum import Enum
+
+
+def get_enum_value(value):
+ if value is None or value in ["None", ""]:
+ return None
+ try:
+ return value.value
+ except AttributeError:
+ return value
+
+
+class StorageErrorCode(str, Enum):
+
+ # Generic storage values
+ account_already_exists = "AccountAlreadyExists"
+ account_being_created = "AccountBeingCreated"
+ account_is_disabled = "AccountIsDisabled"
+ authentication_failed = "AuthenticationFailed"
+ authorization_failure = "AuthorizationFailure"
+ no_authentication_information = "NoAuthenticationInformation"
+ condition_headers_not_supported = "ConditionHeadersNotSupported"
+ condition_not_met = "ConditionNotMet"
+ empty_metadata_key = "EmptyMetadataKey"
+ insufficient_account_permissions = "InsufficientAccountPermissions"
+ internal_error = "InternalError"
+ invalid_authentication_info = "InvalidAuthenticationInfo"
+ invalid_header_value = "InvalidHeaderValue"
+ invalid_http_verb = "InvalidHttpVerb"
+ invalid_input = "InvalidInput"
+ invalid_md5 = "InvalidMd5"
+ invalid_metadata = "InvalidMetadata"
+ invalid_query_parameter_value = "InvalidQueryParameterValue"
+ invalid_range = "InvalidRange"
+ invalid_resource_name = "InvalidResourceName"
+ invalid_uri = "InvalidUri"
+ invalid_xml_document = "InvalidXmlDocument"
+ invalid_xml_node_value = "InvalidXmlNodeValue"
+ md5_mismatch = "Md5Mismatch"
+ metadata_too_large = "MetadataTooLarge"
+ missing_content_length_header = "MissingContentLengthHeader"
+ missing_required_query_parameter = "MissingRequiredQueryParameter"
+ missing_required_header = "MissingRequiredHeader"
+ missing_required_xml_node = "MissingRequiredXmlNode"
+ multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
+ operation_timed_out = "OperationTimedOut"
+ out_of_range_input = "OutOfRangeInput"
+ out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
+ request_body_too_large = "RequestBodyTooLarge"
+ resource_type_mismatch = "ResourceTypeMismatch"
+ request_url_failed_to_parse = "RequestUrlFailedToParse"
+ resource_already_exists = "ResourceAlreadyExists"
+ resource_not_found = "ResourceNotFound"
+ server_busy = "ServerBusy"
+ unsupported_header = "UnsupportedHeader"
+ unsupported_xml_node = "UnsupportedXmlNode"
+ unsupported_query_parameter = "UnsupportedQueryParameter"
+ unsupported_http_verb = "UnsupportedHttpVerb"
+
+ # Blob values
+ append_position_condition_not_met = "AppendPositionConditionNotMet"
+ blob_already_exists = "BlobAlreadyExists"
+ blob_not_found = "BlobNotFound"
+ blob_overwritten = "BlobOverwritten"
+ blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
+ block_count_exceeds_limit = "BlockCountExceedsLimit"
+ block_list_too_long = "BlockListTooLong"
+ cannot_change_to_lower_tier = "CannotChangeToLowerTier"
+ cannot_verify_copy_source = "CannotVerifyCopySource"
+ container_already_exists = "ContainerAlreadyExists"
+ container_being_deleted = "ContainerBeingDeleted"
+ container_disabled = "ContainerDisabled"
+ container_not_found = "ContainerNotFound"
+ content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
+ copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
+ copy_id_mismatch = "CopyIdMismatch"
+ feature_version_mismatch = "FeatureVersionMismatch"
+ incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
+ incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
+ incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
+ infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
+ invalid_blob_or_block = "InvalidBlobOrBlock"
+ invalid_blob_tier = "InvalidBlobTier"
+ invalid_blob_type = "InvalidBlobType"
+ invalid_block_id = "InvalidBlockId"
+ invalid_block_list = "InvalidBlockList"
+ invalid_operation = "InvalidOperation"
+ invalid_page_range = "InvalidPageRange"
+ invalid_source_blob_type = "InvalidSourceBlobType"
+ invalid_source_blob_url = "InvalidSourceBlobUrl"
+ invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
+ lease_already_present = "LeaseAlreadyPresent"
+ lease_already_broken = "LeaseAlreadyBroken"
+ lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
+ lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
+ lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
+ lease_id_missing = "LeaseIdMissing"
+ lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
+ lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
+ lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
+ lease_lost = "LeaseLost"
+ lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
+ lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
+ lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
+ max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
+ no_pending_copy_operation = "NoPendingCopyOperation"
+ operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
+ pending_copy_operation = "PendingCopyOperation"
+ previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
+ previous_snapshot_not_found = "PreviousSnapshotNotFound"
+ previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
+ sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
+ sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
+ snapshot_count_exceeded = "SnapshotCountExceeded"
+ snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
+ snapshots_present = "SnapshotsPresent"
+ source_condition_not_met = "SourceConditionNotMet"
+ system_in_use = "SystemInUse"
+ target_condition_not_met = "TargetConditionNotMet"
+ unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
+ blob_being_rehydrated = "BlobBeingRehydrated"
+ blob_archived = "BlobArchived"
+ blob_not_archived = "BlobNotArchived"
+
+ # Queue values
+ invalid_marker = "InvalidMarker"
+ message_not_found = "MessageNotFound"
+ message_too_large = "MessageTooLarge"
+ pop_receipt_mismatch = "PopReceiptMismatch"
+ queue_already_exists = "QueueAlreadyExists"
+ queue_being_deleted = "QueueBeingDeleted"
+ queue_disabled = "QueueDisabled"
+ queue_not_empty = "QueueNotEmpty"
+ queue_not_found = "QueueNotFound"
+
+ # File values
+ cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory"
+ client_cache_flush_delay = "ClientCacheFlushDelay"
+ delete_pending = "DeletePending"
+ directory_not_empty = "DirectoryNotEmpty"
+ file_lock_conflict = "FileLockConflict"
+ invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName"
+ parent_not_found = "ParentNotFound"
+ read_only_attribute = "ReadOnlyAttribute"
+ share_already_exists = "ShareAlreadyExists"
+ share_being_deleted = "ShareBeingDeleted"
+ share_disabled = "ShareDisabled"
+ share_not_found = "ShareNotFound"
+ sharing_violation = "SharingViolation"
+ share_snapshot_in_progress = "ShareSnapshotInProgress"
+ share_snapshot_count_exceeded = "ShareSnapshotCountExceeded"
+ share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported"
+ share_has_snapshots = "ShareHasSnapshots"
+ container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed"
+
+ # DataLake values
+ content_length_must_be_zero = 'ContentLengthMustBeZero'
+ path_already_exists = 'PathAlreadyExists'
+ invalid_flush_position = 'InvalidFlushPosition'
+ invalid_property_name = 'InvalidPropertyName'
+ invalid_source_uri = 'InvalidSourceUri'
+ unsupported_rest_version = 'UnsupportedRestVersion'
+ file_system_not_found = 'FilesystemNotFound'
+ path_not_found = 'PathNotFound'
+ rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound'
+ source_path_not_found = 'SourcePathNotFound'
+ destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted'
+ file_system_already_exists = 'FilesystemAlreadyExists'
+ file_system_being_deleted = 'FilesystemBeingDeleted'
+ invalid_destination_path = 'InvalidDestinationPath'
+ invalid_rename_source_path = 'InvalidRenameSourcePath'
+ invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType'
+ lease_is_already_broken = 'LeaseIsAlreadyBroken'
+ lease_name_mismatch = 'LeaseNameMismatch'
+ path_conflict = 'PathConflict'
+ source_path_is_being_deleted = 'SourcePathIsBeingDeleted'
+
+
+class DictMixin(object):
+
+ def __setitem__(self, key, item):
+ self.__dict__[key] = item
+
+ def __getitem__(self, key):
+ return self.__dict__[key]
+
+ def __repr__(self):
+ return str(self)
+
+ def __len__(self):
+ return len(self.keys())
+
+ def __delitem__(self, key):
+ self.__dict__[key] = None
+
+ def __eq__(self, other):
+ """Compare objects by comparing all attributes."""
+ if isinstance(other, self.__class__):
+ return self.__dict__ == other.__dict__
+ return False
+
+ def __ne__(self, other):
+ """Compare objects by comparing all attributes."""
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
+
+ def has_key(self, k):
+ return k in self.__dict__
+
+ def update(self, *args, **kwargs):
+ return self.__dict__.update(*args, **kwargs)
+
+ def keys(self):
+ return [k for k in self.__dict__ if not k.startswith('_')]
+
+ def values(self):
+ return [v for k, v in self.__dict__.items() if not k.startswith('_')]
+
+ def items(self):
+ return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
+
+ def get(self, key, default=None):
+ if key in self.__dict__:
+ return self.__dict__[key]
+ return default
+
+
+class LocationMode(object):
+ """
+ Specifies the location the request should be sent to. This mode only applies
+ for RA-GRS accounts which allow secondary read access. All other account types
+ must use PRIMARY.
+ """
+
+ PRIMARY = 'primary' #: Requests should be sent to the primary location.
+ SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible.
+
+
+class ResourceTypes(object):
+ """
+ Specifies the resource types that are accessible with the account SAS.
+
+ :param bool service:
+ Access to service-level APIs (e.g., Get/Set Service Properties,
+ Get Service Stats, List Containers/Queues/Shares)
+ :param bool container:
+ Access to container-level APIs (e.g., Create/Delete Container,
+ Create/Delete Queue, Create/Delete Share,
+ List Blobs/Files and Directories)
+ :param bool object:
+ Access to object-level APIs for blobs, queue messages, and
+ files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
+ """
+
+ def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin
+ self.service = service
+ self.container = container
+ self.object = object
+ self._str = (('s' if self.service else '') +
+ ('c' if self.container else '') +
+ ('o' if self.object else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, string):
+ """Create a ResourceTypes from a string.
+
+ To specify service, container, or object you need only to
+ include the first letter of the word in the string. E.g. service and container,
+ you would provide a string "sc".
+
+ :param str string: Specify service, container, or object in
+ in the string with the first letter of the word.
+ :return: A ResourceTypes object
+ :rtype: ~...blob.ResourceTypes
+ """
+ res_service = 's' in string
+ res_container = 'c' in string
+ res_object = 'o' in string
+
+ parsed = cls(res_service, res_container, res_object)
+ parsed._str = string # pylint: disable = protected-access
+ return parsed
+
+
+class AccountSasPermissions(object):
+ """
+ :class:`~ResourceTypes` class to be used with generate_account_sas
+ function and for the AccessPolicies used with set_*_acl. There are two types of
+ SAS which may be used to grant resource access. One is to grant access to a
+ specific resource (resource-specific). Another is to grant access to the
+ entire service for a specific account and allow certain operations based on
+ perms found here.
+
+ :param bool read:
+ Valid for all signed resources types (Service, Container, and Object).
+ Permits read permissions to the specified resource type.
+ :param bool write:
+ Valid for all signed resources types (Service, Container, and Object).
+ Permits write permissions to the specified resource type.
+ :param bool delete:
+ Valid for Container and Object resource types, except for queue messages.
+ :param bool delete_previous_version:
+ Delete the previous blob version for the versioning enabled storage account.
+ :param bool list:
+ Valid for Service and Container resource types only.
+ :param bool add:
+ Valid for the following Object resource types only: queue messages, and append blobs.
+ :param bool create:
+ Valid for the following Object resource types only: blobs and files.
+ Users can create new blobs or files, but may not overwrite existing
+ blobs or files.
+ :param bool update:
+ Valid for the following Object resource types only: queue messages.
+ :param bool process:
+ Valid for the following Object resource type only: queue messages.
+ :keyword bool tag:
+ To enable set or get tags on the blobs in the container.
+ :keyword bool filter_by_tags:
+ To enable get blobs by tags, this should be used together with list permission.
+ """
+ def __init__(self, read=False, write=False, delete=False,
+ list=False, # pylint: disable=redefined-builtin
+ add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs):
+ self.read = read
+ self.write = write
+ self.delete = delete
+ self.delete_previous_version = delete_previous_version
+ self.list = list
+ self.add = add
+ self.create = create
+ self.update = update
+ self.process = process
+ self.tag = kwargs.pop('tag', False)
+ self.filter_by_tags = kwargs.pop('filter_by_tags', False)
+ self._str = (('r' if self.read else '') +
+ ('w' if self.write else '') +
+ ('d' if self.delete else '') +
+ ('x' if self.delete_previous_version else '') +
+ ('l' if self.list else '') +
+ ('a' if self.add else '') +
+ ('c' if self.create else '') +
+ ('u' if self.update else '') +
+ ('p' if self.process else '') +
+ ('f' if self.filter_by_tags else '') +
+ ('t' if self.tag else '')
+ )
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, permission):
+ """Create AccountSasPermissions from a string.
+
+ To specify read, write, delete, etc. permissions you need only to
+ include the first letter of the word in the string. E.g. for read and write
+ permissions you would provide a string "rw".
+
+ :param str permission: Specify permissions in
+ the string with the first letter of the word.
+ :return: An AccountSasPermissions object
+ :rtype: ~azure.storage.filedatalake.AccountSasPermissions
+ """
+ p_read = 'r' in permission
+ p_write = 'w' in permission
+ p_delete = 'd' in permission
+ p_delete_previous_version = 'x' in permission
+ p_list = 'l' in permission
+ p_add = 'a' in permission
+ p_create = 'c' in permission
+ p_update = 'u' in permission
+ p_process = 'p' in permission
+ p_tag = 't' in permission
+ p_filter_by_tags = 'f' in permission
+ parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version,
+ list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag,
+ filter_by_tags=p_filter_by_tags)
+
+ return parsed
+
+
+class Services(object):
+ """Specifies the services accessible with the account SAS.
+
+ :param bool blob:
+ Access for the `~...blob.BlobServiceClient`
+ :param bool queue:
+ Access for the `~azure.storage.queue.QueueServiceClient`
+ :param bool fileshare:
+ Access for the `~azure.storage.fileshare.ShareServiceClient`
+ """
+
+ def __init__(self, blob=False, queue=False, fileshare=False):
+ self.blob = blob
+ self.queue = queue
+ self.fileshare = fileshare
+ self._str = (('b' if self.blob else '') +
+ ('q' if self.queue else '') +
+ ('f' if self.fileshare else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, string):
+ """Create Services from a string.
+
+ To specify blob, queue, or file you need only to
+ include the first letter of the word in the string. E.g. for blob and queue
+ you would provide a string "bq".
+
+ :param str string: Specify blob, queue, or file in
+ in the string with the first letter of the word.
+ :return: A Services object
+ :rtype: ~...blob.Services
+ """
+ res_blob = 'b' in string
+ res_queue = 'q' in string
+ res_file = 'f' in string
+
+ parsed = cls(res_blob, res_queue, res_file)
+ parsed._str = string # pylint: disable = protected-access
+ return parsed
+
+
+class UserDelegationKey(object):
+ """
+ Represents a user delegation key, provided to the user by Azure Storage
+ based on their Azure Active Directory access token.
+
+ The fields are saved as simple strings since the user does not have to interact with this object;
+ to generate an identify SAS, the user can simply pass it to the right API.
+
+ :ivar str signed_oid:
+ Object ID of this token.
+ :ivar str signed_tid:
+ Tenant ID of the tenant that issued this token.
+ :ivar str signed_start:
+ The datetime this token becomes valid.
+ :ivar str signed_expiry:
+ The datetime this token expires.
+ :ivar str signed_service:
+ What service this key is valid for.
+ :ivar str signed_version:
+ The version identifier of the REST service that created this token.
+ :ivar str value:
+ The user delegation key.
+ """
+ def __init__(self):
+ self.signed_oid = None
+ self.signed_tid = None
+ self.signed_start = None
+ self.signed_expiry = None
+ self.signed_service = None
+ self.signed_version = None
+ self.value = None
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/parser.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/parser.py
new file mode 100644
index 00000000000..c6feba8a639
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/parser.py
@@ -0,0 +1,20 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import sys
+
+if sys.version_info < (3,):
+ def _str(value):
+ if isinstance(value, unicode): # pylint: disable=undefined-variable
+ return value.encode('utf-8')
+
+ return str(value)
+else:
+ _str = str
+
+
+def _to_utc_datetime(value):
+ return value.strftime('%Y-%m-%dT%H:%M:%SZ')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/policies.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/policies.py
new file mode 100644
index 00000000000..c9bc798d671
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/policies.py
@@ -0,0 +1,610 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import re
+import random
+from time import time
+from io import SEEK_SET, UnsupportedOperation
+import logging
+import uuid
+import types
+from typing import Any, TYPE_CHECKING
+from wsgiref.handlers import format_date_time
+try:
+ from urllib.parse import (
+ urlparse,
+ parse_qsl,
+ urlunparse,
+ urlencode,
+ )
+except ImportError:
+ from urllib import urlencode # type: ignore
+ from urlparse import ( # type: ignore
+ urlparse,
+ parse_qsl,
+ urlunparse,
+ )
+
+from azure.core.pipeline.policies import (
+ HeadersPolicy,
+ SansIOHTTPPolicy,
+ NetworkTraceLoggingPolicy,
+ HTTPPolicy,
+ RequestHistory
+)
+from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
+
+from .models import LocationMode
+
+try:
+ _unicode_type = unicode # type: ignore
+except NameError:
+ _unicode_type = str
+
+if TYPE_CHECKING:
+ from azure.core.pipeline import PipelineRequest, PipelineResponse
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def encode_base64(data):
+ if isinstance(data, _unicode_type):
+ data = data.encode('utf-8')
+ encoded = base64.b64encode(data)
+ return encoded.decode('utf-8')
+
+
+def is_exhausted(settings):
+ """Are we out of retries?"""
+ retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+ return min(retry_counts) < 0
+
+
+def retry_hook(settings, **kwargs):
+ if settings['hook']:
+ settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
+
+
+def is_retry(response, mode):
+ """Is this method/status code retryable? (Based on whitelists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ status = response.http_response.status_code
+ if 300 <= status < 500:
+ # An exception occured, but in most cases it was expected. Examples could
+ # include a 309 Conflict or 412 Precondition Failed.
+ if status == 404 and mode == LocationMode.SECONDARY:
+ # Response code 404 should be retried if secondary was used.
+ return True
+ if status == 408:
+ # Response code 408 is a timeout and should be retried.
+ return True
+ return False
+ if status >= 500:
+ # Response codes above 500 with the exception of 501 Not Implemented and
+ # 505 Version Not Supported indicate a server issue and should be retried.
+ if status in [501, 505]:
+ return False
+ return True
+ return False
+
+
+def urljoin(base_url, stub_url):
+ parsed = urlparse(base_url)
+ parsed = parsed._replace(path=parsed.path + '/' + stub_url)
+ return parsed.geturl()
+
+
+class QueueMessagePolicy(SansIOHTTPPolicy):
+
+ def on_request(self, request):
+ message_id = request.context.options.pop('queue_message_id', None)
+ if message_id:
+ request.http_request.url = urljoin(
+ request.http_request.url,
+ message_id)
+
+
+class StorageHeadersPolicy(HeadersPolicy):
+ request_id_header_name = 'x-ms-client-request-id'
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ super(StorageHeadersPolicy, self).on_request(request)
+ current_time = format_date_time(time())
+ request.http_request.headers['x-ms-date'] = current_time
+
+ custom_id = request.context.options.pop('client_request_id', None)
+ request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
+
+ # def on_response(self, request, response):
+ # # raise exception if the echoed client request id from the service is not identical to the one we sent
+ # if self.request_id_header_name in response.http_response.headers:
+
+ # client_request_id = request.http_request.headers.get(self.request_id_header_name)
+
+ # if response.http_response.headers[self.request_id_header_name] != client_request_id:
+ # raise AzureError(
+ # "Echoed client request ID: {} does not match sent client request ID: {}. "
+ # "Service request ID: {}".format(
+ # response.http_response.headers[self.request_id_header_name], client_request_id,
+ # response.http_response.headers['x-ms-request-id']),
+ # response=response.http_response
+ # )
+
+
+class StorageHosts(SansIOHTTPPolicy):
+
+ def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument
+ self.hosts = hosts
+ super(StorageHosts, self).__init__()
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ request.context.options['hosts'] = self.hosts
+ parsed_url = urlparse(request.http_request.url)
+
+ # Detect what location mode we're currently requesting with
+ location_mode = LocationMode.PRIMARY
+ for key, value in self.hosts.items():
+ if parsed_url.netloc == value:
+ location_mode = key
+
+ # See if a specific location mode has been specified, and if so, redirect
+ use_location = request.context.options.pop('use_location', None)
+ if use_location:
+ # Lock retries to the specific location
+ request.context.options['retry_to_secondary'] = False
+ if use_location not in self.hosts:
+ raise ValueError("Attempting to use undefined host location {}".format(use_location))
+ if use_location != location_mode:
+ # Update request URL to use the specified location
+ updated = parsed_url._replace(netloc=self.hosts[use_location])
+ request.http_request.url = updated.geturl()
+ location_mode = use_location
+
+ request.context.options['location_mode'] = location_mode
+
+
+class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
+ """A policy that logs HTTP request and response to the DEBUG logger.
+
+ This accepts both global configuration, and per-request level with "enable_http_logger"
+ """
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ http_request = request.http_request
+ options = request.context.options
+ if options.pop("logging_enable", self.enable_http_logger):
+ request.context["logging_enable"] = True
+ if not _LOGGER.isEnabledFor(logging.DEBUG):
+ return
+
+ try:
+ log_url = http_request.url
+ query_params = http_request.query
+ if 'sig' in query_params:
+ log_url = log_url.replace(query_params['sig'], "sig=*****")
+ _LOGGER.debug("Request URL: %r", log_url)
+ _LOGGER.debug("Request method: %r", http_request.method)
+ _LOGGER.debug("Request headers:")
+ for header, value in http_request.headers.items():
+ if header.lower() == 'authorization':
+ value = '*****'
+ elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
+ # take the url apart and scrub away the signed signature
+ scheme, netloc, path, params, query, fragment = urlparse(value)
+ parsed_qs = dict(parse_qsl(query))
+ parsed_qs['sig'] = '*****'
+
+ # the SAS needs to be put back together
+ value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
+
+ _LOGGER.debug(" %r: %r", header, value)
+ _LOGGER.debug("Request body:")
+
+ # We don't want to log the binary data of a file upload.
+ if isinstance(http_request.body, types.GeneratorType):
+ _LOGGER.debug("File upload")
+ else:
+ _LOGGER.debug(str(http_request.body))
+ except Exception as err: # pylint: disable=broad-except
+ _LOGGER.debug("Failed to log request: %r", err)
+
+ def on_response(self, request, response):
+ # type: (PipelineRequest, PipelineResponse, Any) -> None
+ if response.context.pop("logging_enable", self.enable_http_logger):
+ if not _LOGGER.isEnabledFor(logging.DEBUG):
+ return
+
+ try:
+ _LOGGER.debug("Response status: %r", response.http_response.status_code)
+ _LOGGER.debug("Response headers:")
+ for res_header, value in response.http_response.headers.items():
+ _LOGGER.debug(" %r: %r", res_header, value)
+
+ # We don't want to log binary data if the response is a file.
+ _LOGGER.debug("Response content:")
+ pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
+ header = response.http_response.headers.get('content-disposition')
+
+ if header and pattern.match(header):
+ filename = header.partition('=')[2]
+ _LOGGER.debug("File attachments: %s", filename)
+ elif response.http_response.headers.get("content-type", "").endswith("octet-stream"):
+ _LOGGER.debug("Body contains binary data.")
+ elif response.http_response.headers.get("content-type", "").startswith("image"):
+ _LOGGER.debug("Body contains image data.")
+ else:
+ if response.context.options.get('stream', False):
+ _LOGGER.debug("Body is streamable")
+ else:
+ _LOGGER.debug(response.http_response.text())
+ except Exception as err: # pylint: disable=broad-except
+ _LOGGER.debug("Failed to log response: %s", repr(err))
+
+
+class StorageRequestHook(SansIOHTTPPolicy):
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ self._request_callback = kwargs.get('raw_request_hook')
+ super(StorageRequestHook, self).__init__()
+
+ def on_request(self, request):
+ # type: (PipelineRequest, **Any) -> PipelineResponse
+ request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
+ if request_callback:
+ request_callback(request)
+
+
+class StorageResponseHook(HTTPPolicy):
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ self._response_callback = kwargs.get('raw_response_hook')
+ super(StorageResponseHook, self).__init__()
+
+ def send(self, request):
+ # type: (PipelineRequest) -> PipelineResponse
+ data_stream_total = request.context.get('data_stream_total') or \
+ request.context.options.pop('data_stream_total', None)
+ download_stream_current = request.context.get('download_stream_current') or \
+ request.context.options.pop('download_stream_current', None)
+ upload_stream_current = request.context.get('upload_stream_current') or \
+ request.context.options.pop('upload_stream_current', None)
+ response_callback = request.context.get('response_callback') or \
+ request.context.options.pop('raw_response_hook', self._response_callback)
+
+ response = self.next.send(request)
+ will_retry = is_retry(response, request.context.options.get('mode'))
+ if not will_retry and download_stream_current is not None:
+ download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+ if data_stream_total is None:
+ content_range = response.http_response.headers.get('Content-Range')
+ if content_range:
+ data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+ else:
+ data_stream_total = download_stream_current
+ elif not will_retry and upload_stream_current is not None:
+ upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+ for pipeline_obj in [request, response]:
+ pipeline_obj.context['data_stream_total'] = data_stream_total
+ pipeline_obj.context['download_stream_current'] = download_stream_current
+ pipeline_obj.context['upload_stream_current'] = upload_stream_current
+ if response_callback:
+ response_callback(response)
+ request.context['response_callback'] = response_callback
+ return response
+
+
+class StorageContentValidation(SansIOHTTPPolicy):
+ """A simple policy that sends the given headers
+ with the request.
+
+ This will overwrite any headers already defined in the request.
+ """
+ header_name = 'Content-MD5'
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ super(StorageContentValidation, self).__init__()
+
+ @staticmethod
+ def get_content_md5(data):
+ md5 = hashlib.md5() # nosec
+ if isinstance(data, bytes):
+ md5.update(data)
+ elif hasattr(data, 'read'):
+ pos = 0
+ try:
+ pos = data.tell()
+ except: # pylint: disable=bare-except
+ pass
+ for chunk in iter(lambda: data.read(4096), b""):
+ md5.update(chunk)
+ try:
+ data.seek(pos, SEEK_SET)
+ except (AttributeError, IOError):
+ raise ValueError("Data should be bytes or a seekable file-like object.")
+ else:
+ raise ValueError("Data should be bytes or a seekable file-like object.")
+
+ return md5.digest()
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ validate_content = request.context.options.pop('validate_content', False)
+ if validate_content and request.http_request.method != 'GET':
+ computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
+ request.http_request.headers[self.header_name] = computed_md5
+ request.context['validate_content_md5'] = computed_md5
+ request.context['validate_content'] = validate_content
+
+ def on_response(self, request, response):
+ if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+ computed_md5 = request.context.get('validate_content_md5') or \
+ encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+ if response.http_response.headers['content-md5'] != computed_md5:
+ raise AzureError(
+ 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format(
+ response.http_response.headers['content-md5'], computed_md5),
+ response=response.http_response
+ )
+
+
+class StorageRetryPolicy(HTTPPolicy):
+ """
+ The base class for Exponential and Linear retries containing shared code.
+ """
+
+ def __init__(self, **kwargs):
+ self.total_retries = kwargs.pop('retry_total', 10)
+ self.connect_retries = kwargs.pop('retry_connect', 3)
+ self.read_retries = kwargs.pop('retry_read', 3)
+ self.status_retries = kwargs.pop('retry_status', 3)
+ self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
+ super(StorageRetryPolicy, self).__init__()
+
+ def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use
+ """
+ A function which sets the next host location on the request, if applicable.
+
+ :param ~azure.storage.models.RetryContext context:
+ The retry context containing the previous host location and the request
+ to evaluate and possibly modify.
+ """
+ if settings['hosts'] and all(settings['hosts'].values()):
+ url = urlparse(request.url)
+ # If there's more than one possible location, retry to the alternative
+ if settings['mode'] == LocationMode.PRIMARY:
+ settings['mode'] = LocationMode.SECONDARY
+ else:
+ settings['mode'] = LocationMode.PRIMARY
+ updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
+ request.url = updated.geturl()
+
+ def configure_retries(self, request): # pylint: disable=no-self-use
+ body_position = None
+ if hasattr(request.http_request.body, 'read'):
+ try:
+ body_position = request.http_request.body.tell()
+ except (AttributeError, UnsupportedOperation):
+ # if body position cannot be obtained, then retries will not work
+ pass
+ options = request.context.options
+ return {
+ 'total': options.pop("retry_total", self.total_retries),
+ 'connect': options.pop("retry_connect", self.connect_retries),
+ 'read': options.pop("retry_read", self.read_retries),
+ 'status': options.pop("retry_status", self.status_retries),
+ 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
+ 'mode': options.pop("location_mode", LocationMode.PRIMARY),
+ 'hosts': options.pop("hosts", None),
+ 'hook': options.pop("retry_hook", None),
+ 'body_position': body_position,
+ 'count': 0,
+ 'history': []
+ }
+
+ def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use
+ """ Formula for computing the current backoff.
+ Should be calculated by child class.
+
+ :rtype: float
+ """
+ return 0
+
+ def sleep(self, settings, transport):
+ backoff = self.get_backoff_time(settings)
+ if not backoff or backoff < 0:
+ return
+ transport.sleep(backoff)
+
+ def increment(self, settings, request, response=None, error=None):
+ """Increment the retry counters.
+
+ :param response: A pipeline response object.
+ :param error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: Whether the retry attempts are exhausted.
+ """
+ settings['total'] -= 1
+
+ if error and isinstance(error, ServiceRequestError):
+ # Errors when we're fairly sure that the server did not receive the
+ # request, so it should be safe to retry.
+ settings['connect'] -= 1
+ settings['history'].append(RequestHistory(request, error=error))
+
+ elif error and isinstance(error, ServiceResponseError):
+ # Errors that occur after the request has been started, so we should
+ # assume that the server began processing it.
+ settings['read'] -= 1
+ settings['history'].append(RequestHistory(request, error=error))
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and a the given method is in the whitelist
+ if response:
+ settings['status'] -= 1
+ settings['history'].append(RequestHistory(request, http_response=response))
+
+ if not is_exhausted(settings):
+ if request.method not in ['PUT'] and settings['retry_secondary']:
+ self._set_next_host_location(settings, request)
+
+ # rewind the request body if it is a stream
+ if request.body and hasattr(request.body, 'read'):
+ # no position was saved, then retry would not work
+ if settings['body_position'] is None:
+ return False
+ try:
+ # attempt to rewind the body to the initial position
+ request.body.seek(settings['body_position'], SEEK_SET)
+ except (UnsupportedOperation, ValueError):
+ # if body is not seekable, then retry would not work
+ return False
+ settings['count'] += 1
+ return True
+ return False
+
+ def send(self, request):
+ retries_remaining = True
+ response = None
+ retry_settings = self.configure_retries(request)
+ while retries_remaining:
+ try:
+ response = self.next.send(request)
+ if is_retry(response, retry_settings['mode']):
+ retries_remaining = self.increment(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response)
+ if retries_remaining:
+ retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response,
+ error=None)
+ self.sleep(retry_settings, request.context.transport)
+ continue
+ break
+ except AzureError as err:
+ retries_remaining = self.increment(
+ retry_settings, request=request.http_request, error=err)
+ if retries_remaining:
+ retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=None,
+ error=err)
+ self.sleep(retry_settings, request.context.transport)
+ continue
+ raise err
+ if retry_settings['history']:
+ response.context['history'] = retry_settings['history']
+ response.http_response.location_mode = retry_settings['mode']
+ return response
+
+
+class ExponentialRetry(StorageRetryPolicy):
+ """Exponential retry."""
+
+ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
+ retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ '''
+ Constructs an Exponential retry object. The initial_backoff is used for
+ the first retry. Subsequent retries are retried after initial_backoff +
+ increment_power^retry_count seconds. For example, by default the first retry
+ occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+ third after (15+3^2) = 24 seconds.
+
+ :param int initial_backoff:
+ The initial backoff interval, in seconds, for the first retry.
+ :param int increment_base:
+ The base, in seconds, to increment the initial_backoff by after the
+ first retry.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ '''
+ self.initial_backoff = initial_backoff
+ self.increment_base = increment_base
+ self.random_jitter_range = random_jitter_range
+ super(ExponentialRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+ random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+ random_range_end = backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(StorageRetryPolicy):
+ """Linear retry."""
+
+ def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ """
+ Constructs a Linear retry object.
+
+ :param int backoff:
+ The backoff interval, in seconds, between retries.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ """
+ self.backoff = backoff
+ self.random_jitter_range = random_jitter_range
+ super(LinearRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ # the backoff interval normally does not change, however there is the possibility
+ # that it was modified by accessing the property directly after initializing the object
+ random_range_start = self.backoff - self.random_jitter_range \
+ if self.backoff > self.random_jitter_range else 0
+ random_range_end = self.backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/policies_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/policies_async.py
new file mode 100644
index 00000000000..e0926b81dbc
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/policies_async.py
@@ -0,0 +1,220 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+import asyncio
+import random
+import logging
+from typing import Any, TYPE_CHECKING
+
+from azure.core.pipeline.policies import AsyncHTTPPolicy
+from azure.core.exceptions import AzureError
+
+from .policies import is_retry, StorageRetryPolicy
+
+if TYPE_CHECKING:
+ from azure.core.pipeline import PipelineRequest, PipelineResponse
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+async def retry_hook(settings, **kwargs):
+ if settings['hook']:
+ if asyncio.iscoroutine(settings['hook']):
+ await settings['hook'](
+ retry_count=settings['count'] - 1,
+ location_mode=settings['mode'],
+ **kwargs)
+ else:
+ settings['hook'](
+ retry_count=settings['count'] - 1,
+ location_mode=settings['mode'],
+ **kwargs)
+
+
+class AsyncStorageResponseHook(AsyncHTTPPolicy):
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ self._response_callback = kwargs.get('raw_response_hook')
+ super(AsyncStorageResponseHook, self).__init__()
+
+ async def send(self, request):
+ # type: (PipelineRequest) -> PipelineResponse
+ data_stream_total = request.context.get('data_stream_total') or \
+ request.context.options.pop('data_stream_total', None)
+ download_stream_current = request.context.get('download_stream_current') or \
+ request.context.options.pop('download_stream_current', None)
+ upload_stream_current = request.context.get('upload_stream_current') or \
+ request.context.options.pop('upload_stream_current', None)
+ response_callback = request.context.get('response_callback') or \
+ request.context.options.pop('raw_response_hook', self._response_callback)
+
+ response = await self.next.send(request)
+ await response.http_response.load_body()
+
+ will_retry = is_retry(response, request.context.options.get('mode'))
+ if not will_retry and download_stream_current is not None:
+ download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+ if data_stream_total is None:
+ content_range = response.http_response.headers.get('Content-Range')
+ if content_range:
+ data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+ else:
+ data_stream_total = download_stream_current
+ elif not will_retry and upload_stream_current is not None:
+ upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+ for pipeline_obj in [request, response]:
+ pipeline_obj.context['data_stream_total'] = data_stream_total
+ pipeline_obj.context['download_stream_current'] = download_stream_current
+ pipeline_obj.context['upload_stream_current'] = upload_stream_current
+ if response_callback:
+ if asyncio.iscoroutine(response_callback):
+ await response_callback(response)
+ else:
+ response_callback(response)
+ request.context['response_callback'] = response_callback
+ return response
+
+class AsyncStorageRetryPolicy(StorageRetryPolicy):
+ """
+ The base class for Exponential and Linear retries containing shared code.
+ """
+
+ async def sleep(self, settings, transport):
+ backoff = self.get_backoff_time(settings)
+ if not backoff or backoff < 0:
+ return
+ await transport.sleep(backoff)
+
+ async def send(self, request):
+ retries_remaining = True
+ response = None
+ retry_settings = self.configure_retries(request)
+ while retries_remaining:
+ try:
+ response = await self.next.send(request)
+ if is_retry(response, retry_settings['mode']):
+ retries_remaining = self.increment(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response)
+ if retries_remaining:
+ await retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response,
+ error=None)
+ await self.sleep(retry_settings, request.context.transport)
+ continue
+ break
+ except AzureError as err:
+ retries_remaining = self.increment(
+ retry_settings, request=request.http_request, error=err)
+ if retries_remaining:
+ await retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=None,
+ error=err)
+ await self.sleep(retry_settings, request.context.transport)
+ continue
+ raise err
+ if retry_settings['history']:
+ response.context['history'] = retry_settings['history']
+ response.http_response.location_mode = retry_settings['mode']
+ return response
+
+
+class ExponentialRetry(AsyncStorageRetryPolicy):
+ """Exponential retry."""
+
+ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
+ retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ '''
+ Constructs an Exponential retry object. The initial_backoff is used for
+ the first retry. Subsequent retries are retried after initial_backoff +
+ increment_power^retry_count seconds. For example, by default the first retry
+ occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+ third after (15+3^2) = 24 seconds.
+
+ :param int initial_backoff:
+ The initial backoff interval, in seconds, for the first retry.
+ :param int increment_base:
+ The base, in seconds, to increment the initial_backoff by after the
+ first retry.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ '''
+ self.initial_backoff = initial_backoff
+ self.increment_base = increment_base
+ self.random_jitter_range = random_jitter_range
+ super(ExponentialRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+ random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+ random_range_end = backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(AsyncStorageRetryPolicy):
+ """Linear retry."""
+
+ def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ """
+ Constructs a Linear retry object.
+
+ :param int backoff:
+ The backoff interval, in seconds, between retries.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ """
+ self.backoff = backoff
+ self.random_jitter_range = random_jitter_range
+ super(LinearRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ # the backoff interval normally does not change, however there is the possibility
+ # that it was modified by accessing the property directly after initializing the object
+ random_range_start = self.backoff - self.random_jitter_range \
+ if self.backoff > self.random_jitter_range else 0
+ random_range_end = self.backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/request_handlers.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/request_handlers.py
new file mode 100644
index 00000000000..37354d7907a
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/request_handlers.py
@@ -0,0 +1,273 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
+ TYPE_CHECKING
+)
+
+import logging
+from os import fstat
+from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
+
+import isodate
+
+from azure.core.exceptions import raise_with_traceback
+
+
+_LOGGER = logging.getLogger(__name__)
+
+_REQUEST_DELIMITER_PREFIX = "batch_"
+_HTTP1_1_IDENTIFIER = "HTTP/1.1"
+_HTTP_LINE_ENDING = "\r\n"
+
+
+def serialize_iso(attr):
+ """Serialize Datetime object into ISO-8601 formatted string.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: str
+ :raises: ValueError if format invalid.
+ """
+ if not attr:
+ return None
+ if isinstance(attr, str):
+ attr = isodate.parse_datetime(attr)
+ try:
+ utc = attr.utctimetuple()
+ if utc.tm_year > 9999 or utc.tm_year < 1:
+ raise OverflowError("Hit max or min date")
+
+ date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+ utc.tm_year, utc.tm_mon, utc.tm_mday,
+ utc.tm_hour, utc.tm_min, utc.tm_sec)
+ return date + 'Z'
+ except (ValueError, OverflowError) as err:
+ msg = "Unable to serialize datetime object."
+ raise_with_traceback(ValueError, msg, err)
+ except AttributeError as err:
+ msg = "ISO-8601 object must be valid Datetime object."
+ raise_with_traceback(TypeError, msg, err)
+
+
+def get_length(data):
+ length = None
+ # Check if object implements the __len__ method, covers most input cases such as bytearray.
+ try:
+ length = len(data)
+ except: # pylint: disable=bare-except
+ pass
+
+ if not length:
+ # Check if the stream is a file-like stream object.
+ # If so, calculate the size using the file descriptor.
+ try:
+ fileno = data.fileno()
+ except (AttributeError, UnsupportedOperation):
+ pass
+ else:
+ try:
+ return fstat(fileno).st_size
+ except OSError:
+ # Not a valid fileno, may be possible requests returned
+ # a socket number?
+ pass
+
+ # If the stream is seekable and tell() is implemented, calculate the stream size.
+ try:
+ current_position = data.tell()
+ data.seek(0, SEEK_END)
+ length = data.tell() - current_position
+ data.seek(current_position, SEEK_SET)
+ except (AttributeError, UnsupportedOperation):
+ pass
+
+ return length
+
+
+def read_length(data):
+ try:
+ if hasattr(data, 'read'):
+ read_data = b''
+ for chunk in iter(lambda: data.read(4096), b""):
+ read_data += chunk
+ return len(read_data), read_data
+ if hasattr(data, '__iter__'):
+ read_data = b''
+ for chunk in data:
+ read_data += chunk
+ return len(read_data), read_data
+ except: # pylint: disable=bare-except
+ pass
+ raise ValueError("Unable to calculate content length, please specify.")
+
+
+def validate_and_format_range_headers(
+ start_range, end_range, start_range_required=True,
+ end_range_required=True, check_content_md5=False, align_to_page=False):
+ # If end range is provided, start range must be provided
+ if (start_range_required or end_range is not None) and start_range is None:
+ raise ValueError("start_range value cannot be None.")
+ if end_range_required and end_range is None:
+ raise ValueError("end_range value cannot be None.")
+
+ # Page ranges must be 512 aligned
+ if align_to_page:
+ if start_range is not None and start_range % 512 != 0:
+ raise ValueError("Invalid page blob start_range: {0}. "
+ "The size must be aligned to a 512-byte boundary.".format(start_range))
+ if end_range is not None and end_range % 512 != 511:
+ raise ValueError("Invalid page blob end_range: {0}. "
+ "The size must be aligned to a 512-byte boundary.".format(end_range))
+
+ # Format based on whether end_range is present
+ range_header = None
+ if end_range is not None:
+ range_header = 'bytes={0}-{1}'.format(start_range, end_range)
+ elif start_range is not None:
+ range_header = "bytes={0}-".format(start_range)
+
+ # Content MD5 can only be provided for a complete range less than 4MB in size
+ range_validation = None
+ if check_content_md5:
+ if start_range is None or end_range is None:
+ raise ValueError("Both start and end range requied for MD5 content validation.")
+ if end_range - start_range > 4 * 1024 * 1024:
+ raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
+ range_validation = 'true'
+
+ return range_header, range_validation
+
+
+def add_metadata_headers(metadata=None):
+ # type: (Optional[Dict[str, str]]) -> Dict[str, str]
+ headers = {}
+ if metadata:
+ for key, value in metadata.items():
+ headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value
+ return headers
+
+
+def serialize_batch_body(requests, batch_id):
+ """
+ --
+
+ --
+ (repeated as needed)
+ ----
+
+ Serializes the requests in this batch to a single HTTP mixed/multipart body.
+
+ :param list[~azure.core.pipeline.transport.HttpRequest] requests:
+ a list of sub-request for the batch request
+ :param str batch_id:
+ to be embedded in batch sub-request delimiter
+ :return: The body bytes for this batch.
+ """
+
+ if requests is None or len(requests) == 0:
+ raise ValueError('Please provide sub-request(s) for this batch request')
+
+ delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8')
+ newline_bytes = _HTTP_LINE_ENDING.encode('utf-8')
+ batch_body = list()
+
+ content_index = 0
+ for request in requests:
+ request.headers.update({
+ "Content-ID": str(content_index),
+ "Content-Length": str(0)
+ })
+ batch_body.append(delimiter_bytes)
+ batch_body.append(_make_body_from_sub_request(request))
+ batch_body.append(newline_bytes)
+ content_index += 1
+
+ batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8'))
+ # final line of body MUST have \r\n at the end, or it will not be properly read by the service
+ batch_body.append(newline_bytes)
+
+ return bytes().join(batch_body)
+
+
+def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False):
+ """
+ Gets the delimiter used for this batch request's mixed/multipart HTTP format.
+
+ :param str batch_id:
+ Randomly generated id
+ :param bool is_prepend_dashes:
+ Whether to include the starting dashes. Used in the body, but non on defining the delimiter.
+ :param bool is_append_dashes:
+ Whether to include the ending dashes. Used in the body on the closing delimiter only.
+ :return: The delimiter, WITHOUT a trailing newline.
+ """
+
+ prepend_dashes = '--' if is_prepend_dashes else ''
+ append_dashes = '--' if is_append_dashes else ''
+
+ return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes
+
+
+def _make_body_from_sub_request(sub_request):
+ """
+ Content-Type: application/http
+ Content-ID:
+ Content-Transfer-Encoding: (if present)
+
+ HTTP/
+ : (repeated as necessary)
+ Content-Length:
+ (newline if content length > 0)
+ (if content length > 0)
+
+ Serializes an http request.
+
+ :param ~azure.core.pipeline.transport.HttpRequest sub_request:
+ Request to serialize.
+ :return: The serialized sub-request in bytes
+ """
+
+ # put the sub-request's headers into a list for efficient str concatenation
+ sub_request_body = list()
+
+ # get headers for ease of manipulation; remove headers as they are used
+ headers = sub_request.headers
+
+ # append opening headers
+ sub_request_body.append("Content-Type: application/http")
+ sub_request_body.append(_HTTP_LINE_ENDING)
+
+ sub_request_body.append("Content-ID: ")
+ sub_request_body.append(headers.pop("Content-ID", ""))
+ sub_request_body.append(_HTTP_LINE_ENDING)
+
+ sub_request_body.append("Content-Transfer-Encoding: binary")
+ sub_request_body.append(_HTTP_LINE_ENDING)
+
+ # append blank line
+ sub_request_body.append(_HTTP_LINE_ENDING)
+
+ # append HTTP verb and path and query and HTTP version
+ sub_request_body.append(sub_request.method)
+ sub_request_body.append(' ')
+ sub_request_body.append(sub_request.url)
+ sub_request_body.append(' ')
+ sub_request_body.append(_HTTP1_1_IDENTIFIER)
+ sub_request_body.append(_HTTP_LINE_ENDING)
+
+ # append remaining headers (this will set the Content-Length, as it was set on `sub-request`)
+ for header_name, header_value in headers.items():
+ if header_value is not None:
+ sub_request_body.append(header_name)
+ sub_request_body.append(": ")
+ sub_request_body.append(header_value)
+ sub_request_body.append(_HTTP_LINE_ENDING)
+
+ # append blank line
+ sub_request_body.append(_HTTP_LINE_ENDING)
+
+ return ''.join(sub_request_body).encode()
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/response_handlers.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/response_handlers.py
new file mode 100644
index 00000000000..be60b3878f6
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/response_handlers.py
@@ -0,0 +1,159 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
+ TYPE_CHECKING
+)
+import logging
+
+from azure.core.pipeline.policies import ContentDecodePolicy
+from azure.core.exceptions import (
+ HttpResponseError,
+ ResourceNotFoundError,
+ ResourceModifiedError,
+ ResourceExistsError,
+ ClientAuthenticationError,
+ DecodeError)
+
+from .parser import _to_utc_datetime
+from .models import StorageErrorCode, UserDelegationKey, get_enum_value
+
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from azure.core.exceptions import AzureError
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class PartialBatchErrorException(HttpResponseError):
+ """There is a partial failure in batch operations.
+
+ :param str message: The message of the exception.
+ :param response: Server response to be deserialized.
+ :param list parts: A list of the parts in multipart response.
+ """
+
+ def __init__(self, message, response, parts):
+ self.parts = parts
+ super(PartialBatchErrorException, self).__init__(message=message, response=response)
+
+
+def parse_length_from_content_range(content_range):
+ '''
+ Parses the blob length from the content range header: bytes 1-3/65537
+ '''
+ if content_range is None:
+ return None
+
+ # First, split in space and take the second half: '1-3/65537'
+ # Next, split on slash and take the second half: '65537'
+ # Finally, convert to an int: 65537
+ return int(content_range.split(' ', 1)[1].split('/', 1)[1])
+
+
+def normalize_headers(headers):
+ normalized = {}
+ for key, value in headers.items():
+ if key.startswith('x-ms-'):
+ key = key[5:]
+ normalized[key.lower().replace('-', '_')] = get_enum_value(value)
+ return normalized
+
+
+def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument
+ raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
+ return {k[10:]: v for k, v in raw_metadata.items()}
+
+
+def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument
+ return normalize_headers(response_headers)
+
+
+def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument
+ return normalize_headers(response_headers), deserialized
+
+
+def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument
+ return response.http_response.location_mode, deserialized
+
+
+def process_storage_error(storage_error):
+ raise_error = HttpResponseError
+ error_code = storage_error.response.headers.get('x-ms-error-code')
+ error_message = storage_error.message
+ additional_data = {}
+ try:
+ error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+ if error_body:
+ for info in error_body.iter():
+ if info.tag.lower() == 'code':
+ error_code = info.text
+ elif info.tag.lower() == 'message':
+ error_message = info.text
+ else:
+ additional_data[info.tag] = info.text
+ except DecodeError:
+ pass
+
+ try:
+ if error_code:
+ error_code = StorageErrorCode(error_code)
+ if error_code in [StorageErrorCode.condition_not_met,
+ StorageErrorCode.blob_overwritten]:
+ raise_error = ResourceModifiedError
+ if error_code in [StorageErrorCode.invalid_authentication_info,
+ StorageErrorCode.authentication_failed]:
+ raise_error = ClientAuthenticationError
+ if error_code in [StorageErrorCode.resource_not_found,
+ StorageErrorCode.cannot_verify_copy_source,
+ StorageErrorCode.blob_not_found,
+ StorageErrorCode.queue_not_found,
+ StorageErrorCode.container_not_found,
+ StorageErrorCode.parent_not_found,
+ StorageErrorCode.share_not_found]:
+ raise_error = ResourceNotFoundError
+ if error_code in [StorageErrorCode.account_already_exists,
+ StorageErrorCode.account_being_created,
+ StorageErrorCode.resource_already_exists,
+ StorageErrorCode.resource_type_mismatch,
+ StorageErrorCode.blob_already_exists,
+ StorageErrorCode.queue_already_exists,
+ StorageErrorCode.container_already_exists,
+ StorageErrorCode.container_being_deleted,
+ StorageErrorCode.queue_being_deleted,
+ StorageErrorCode.share_already_exists,
+ StorageErrorCode.share_being_deleted]:
+ raise_error = ResourceExistsError
+ except ValueError:
+ # Got an unknown error code
+ pass
+
+ try:
+ error_message += "\nErrorCode:{}".format(error_code.value)
+ except AttributeError:
+ error_message += "\nErrorCode:{}".format(error_code)
+ for name, info in additional_data.items():
+ error_message += "\n{}:{}".format(name, info)
+
+ error = raise_error(message=error_message, response=storage_error.response)
+ error.error_code = error_code
+ error.additional_info = additional_data
+ error.raise_with_traceback()
+
+
+def parse_to_internal_user_delegation_key(service_user_delegation_key):
+ internal_user_delegation_key = UserDelegationKey()
+ internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
+ internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
+ internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
+ internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
+ internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
+ internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
+ internal_user_delegation_key.value = service_user_delegation_key.value
+ return internal_user_delegation_key
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/shared_access_signature.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/shared_access_signature.py
new file mode 100644
index 00000000000..07aad5ffa1c
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/shared_access_signature.py
@@ -0,0 +1,220 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import date
+
+from .parser import _str, _to_utc_datetime
+from .constants import X_MS_VERSION
+from . import sign_string, url_quote
+
+
+class QueryStringConstants(object):
+ SIGNED_SIGNATURE = 'sig'
+ SIGNED_PERMISSION = 'sp'
+ SIGNED_START = 'st'
+ SIGNED_EXPIRY = 'se'
+ SIGNED_RESOURCE = 'sr'
+ SIGNED_IDENTIFIER = 'si'
+ SIGNED_IP = 'sip'
+ SIGNED_PROTOCOL = 'spr'
+ SIGNED_VERSION = 'sv'
+ SIGNED_CACHE_CONTROL = 'rscc'
+ SIGNED_CONTENT_DISPOSITION = 'rscd'
+ SIGNED_CONTENT_ENCODING = 'rsce'
+ SIGNED_CONTENT_LANGUAGE = 'rscl'
+ SIGNED_CONTENT_TYPE = 'rsct'
+ START_PK = 'spk'
+ START_RK = 'srk'
+ END_PK = 'epk'
+ END_RK = 'erk'
+ SIGNED_RESOURCE_TYPES = 'srt'
+ SIGNED_SERVICES = 'ss'
+ SIGNED_OID = 'skoid'
+ SIGNED_TID = 'sktid'
+ SIGNED_KEY_START = 'skt'
+ SIGNED_KEY_EXPIRY = 'ske'
+ SIGNED_KEY_SERVICE = 'sks'
+ SIGNED_KEY_VERSION = 'skv'
+
+ # for ADLS
+ SIGNED_AUTHORIZED_OID = 'saoid'
+ SIGNED_UNAUTHORIZED_OID = 'suoid'
+ SIGNED_CORRELATION_ID = 'scid'
+ SIGNED_DIRECTORY_DEPTH = 'sdd'
+
+ @staticmethod
+ def to_list():
+ return [
+ QueryStringConstants.SIGNED_SIGNATURE,
+ QueryStringConstants.SIGNED_PERMISSION,
+ QueryStringConstants.SIGNED_START,
+ QueryStringConstants.SIGNED_EXPIRY,
+ QueryStringConstants.SIGNED_RESOURCE,
+ QueryStringConstants.SIGNED_IDENTIFIER,
+ QueryStringConstants.SIGNED_IP,
+ QueryStringConstants.SIGNED_PROTOCOL,
+ QueryStringConstants.SIGNED_VERSION,
+ QueryStringConstants.SIGNED_CACHE_CONTROL,
+ QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
+ QueryStringConstants.SIGNED_CONTENT_ENCODING,
+ QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
+ QueryStringConstants.SIGNED_CONTENT_TYPE,
+ QueryStringConstants.START_PK,
+ QueryStringConstants.START_RK,
+ QueryStringConstants.END_PK,
+ QueryStringConstants.END_RK,
+ QueryStringConstants.SIGNED_RESOURCE_TYPES,
+ QueryStringConstants.SIGNED_SERVICES,
+ QueryStringConstants.SIGNED_OID,
+ QueryStringConstants.SIGNED_TID,
+ QueryStringConstants.SIGNED_KEY_START,
+ QueryStringConstants.SIGNED_KEY_EXPIRY,
+ QueryStringConstants.SIGNED_KEY_SERVICE,
+ QueryStringConstants.SIGNED_KEY_VERSION,
+ # for ADLS
+ QueryStringConstants.SIGNED_AUTHORIZED_OID,
+ QueryStringConstants.SIGNED_UNAUTHORIZED_OID,
+ QueryStringConstants.SIGNED_CORRELATION_ID,
+ QueryStringConstants.SIGNED_DIRECTORY_DEPTH,
+ ]
+
+
+class SharedAccessSignature(object):
+ '''
+ Provides a factory for creating account access
+ signature tokens with an account name and account key. Users can either
+ use the factory or can construct the appropriate service and use the
+ generate_*_shared_access_signature method directly.
+ '''
+
+ def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
+ '''
+ :param str account_name:
+ The storage account name used to generate the shared access signatures.
+ :param str account_key:
+ The access key to generate the shares access signatures.
+ :param str x_ms_version:
+ The service version used to generate the shared access signatures.
+ '''
+ self.account_name = account_name
+ self.account_key = account_key
+ self.x_ms_version = x_ms_version
+
+ def generate_account(self, services, resource_types, permission, expiry, start=None,
+ ip=None, protocol=None):
+ '''
+ Generates a shared access signature for the account.
+ Use the returned signature with the sas_token parameter of the service
+ or to create a new account object.
+
+ :param ResourceTypes resource_types:
+ Specifies the resource types that are accessible with the account
+ SAS. You can combine values to provide access to more than one
+ resource type.
+ :param AccountSasPermissions permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy. You can combine
+ values to provide more than one permission.
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: datetime or str
+ :param str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :param str protocol:
+ Specifies the protocol permitted for a request made. The default value
+ is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+ '''
+ sas = _SharedAccessHelper()
+ sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+ sas.add_account(services, resource_types)
+ sas.add_account_signature(self.account_name, self.account_key)
+
+ return sas.get_token()
+
+
+class _SharedAccessHelper(object):
+ def __init__(self):
+ self.query_dict = {}
+
+ def _add_query(self, name, val):
+ if val:
+ self.query_dict[name] = _str(val) if val is not None else None
+
+ def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
+ if isinstance(start, date):
+ start = _to_utc_datetime(start)
+
+ if isinstance(expiry, date):
+ expiry = _to_utc_datetime(expiry)
+
+ self._add_query(QueryStringConstants.SIGNED_START, start)
+ self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
+ self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
+ self._add_query(QueryStringConstants.SIGNED_IP, ip)
+ self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
+ self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
+
+ def add_resource(self, resource):
+ self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
+
+ def add_id(self, policy_id):
+ self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
+
+ def add_account(self, services, resource_types):
+ self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
+ self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
+
+ def add_override_response_headers(self, cache_control,
+ content_disposition,
+ content_encoding,
+ content_language,
+ content_type):
+ self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
+
+ def add_account_signature(self, account_name, account_key):
+ def get_value_to_append(query):
+ return_value = self.query_dict.get(query) or ''
+ return return_value + '\n'
+
+ string_to_sign = \
+ (account_name + '\n' +
+ get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+ get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
+ get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
+ get_value_to_append(QueryStringConstants.SIGNED_START) +
+ get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+ get_value_to_append(QueryStringConstants.SIGNED_IP) +
+ get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+ get_value_to_append(QueryStringConstants.SIGNED_VERSION))
+
+ self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+ sign_string(account_key, string_to_sign))
+
+ def get_token(self):
+ return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/uploads.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/uploads.py
new file mode 100644
index 00000000000..1b619dfc3be
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/uploads.py
@@ -0,0 +1,602 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+
+from concurrent import futures
+from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
+from threading import Lock
+from itertools import islice
+from math import ceil
+
+import six
+
+from azure.core.tracing.common import with_current_context
+
+from . import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .encryption import get_blob_encryptor_and_padder
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
+
+
+def _parallel_uploads(executor, uploader, pending, running):
+ range_ids = []
+ while True:
+ # Wait for some download to finish before adding a new one
+ done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
+ range_ids.extend([chunk.result() for chunk in done])
+ try:
+ for _ in range(0, len(done)):
+ next_chunk = next(pending)
+ running.add(executor.submit(with_current_context(uploader), next_chunk))
+ except StopIteration:
+ break
+
+ # Wait for the remaining uploads to finish
+ done, _running = futures.wait(running)
+ range_ids.extend([chunk.result() for chunk in done])
+ return range_ids
+
+
+def upload_data_chunks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ validate_content=None,
+ encryption_options=None,
+ **kwargs):
+
+ if encryption_options:
+ encryptor, padder = get_blob_encryptor_and_padder(
+ encryption_options.get('cek'),
+ encryption_options.get('vector'),
+ uploader_class is not PageBlobChunkUploader)
+ kwargs['encryptor'] = encryptor
+ kwargs['padder'] = padder
+
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ validate_content=validate_content,
+ **kwargs)
+ if parallel:
+ with futures.ThreadPoolExecutor(max_concurrency) as executor:
+ upload_tasks = uploader.get_chunk_streams()
+ running_futures = [
+ executor.submit(with_current_context(uploader.process_chunk), u)
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
+ else:
+ range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
+ if any(range_ids):
+ return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+ return uploader.response_headers
+
+
+def upload_substream_blocks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ **kwargs):
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ **kwargs)
+
+ if parallel:
+ with futures.ThreadPoolExecutor(max_concurrency) as executor:
+ upload_tasks = uploader.get_substream_blocks()
+ running_futures = [
+ executor.submit(with_current_context(uploader.process_substream_block), u)
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
+ else:
+ range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
+ if any(range_ids):
+ return sorted(range_ids)
+ return []
+
+
+class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
+
+ def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
+ self.service = service
+ self.total_size = total_size
+ self.chunk_size = chunk_size
+ self.stream = stream
+ self.parallel = parallel
+
+ # Stream management
+ self.stream_start = stream.tell() if parallel else None
+ self.stream_lock = Lock() if parallel else None
+
+ # Progress feedback
+ self.progress_total = 0
+ self.progress_lock = Lock() if parallel else None
+
+ # Encryption
+ self.encryptor = encryptor
+ self.padder = padder
+ self.response_headers = None
+ self.etag = None
+ self.last_modified = None
+ self.request_options = kwargs
+
+ def get_chunk_streams(self):
+ index = 0
+ while True:
+ data = b""
+ read_size = self.chunk_size
+
+ # Buffer until we either reach the end of the stream or get a whole chunk.
+ while True:
+ if self.total_size:
+ read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+ temp = self.stream.read(read_size)
+ if not isinstance(temp, six.binary_type):
+ raise TypeError("Blob data should be of type bytes.")
+ data += temp or b""
+
+ # We have read an empty string and so are at the end
+ # of the buffer or we have read a full chunk.
+ if temp == b"" or len(data) == self.chunk_size:
+ break
+
+ if len(data) == self.chunk_size:
+ if self.padder:
+ data = self.padder.update(data)
+ if self.encryptor:
+ data = self.encryptor.update(data)
+ yield index, data
+ else:
+ if self.padder:
+ data = self.padder.update(data) + self.padder.finalize()
+ if self.encryptor:
+ data = self.encryptor.update(data) + self.encryptor.finalize()
+ if data:
+ yield index, data
+ break
+ index += len(data)
+
+ def process_chunk(self, chunk_data):
+ chunk_bytes = chunk_data[1]
+ chunk_offset = chunk_data[0]
+ return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+ def _update_progress(self, length):
+ if self.progress_lock is not None:
+ with self.progress_lock:
+ self.progress_total += length
+ else:
+ self.progress_total += length
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+ range_id = self._upload_chunk(chunk_offset, chunk_data)
+ self._update_progress(len(chunk_data))
+ return range_id
+
+ def get_substream_blocks(self):
+ assert self.chunk_size is not None
+ lock = self.stream_lock
+ blob_length = self.total_size
+
+ if blob_length is None:
+ blob_length = get_length(self.stream)
+ if blob_length is None:
+ raise ValueError("Unable to determine content length of upload data.")
+
+ blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+ last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+ for i in range(blocks):
+ index = i * self.chunk_size
+ length = last_block_size if i == blocks - 1 else self.chunk_size
+ yield index, SubStream(self.stream, index, length, lock)
+
+ def process_substream_block(self, block_data):
+ return self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+ def _upload_substream_block(self, index, block_stream):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ def _upload_substream_block_with_progress(self, index, block_stream):
+ range_id = self._upload_substream_block(index, block_stream)
+ self._update_progress(len(block_stream))
+ return range_id
+
+ def set_response_properties(self, resp):
+ self.etag = resp.etag
+ self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+ def __init__(self, *args, **kwargs):
+ kwargs.pop("modified_access_conditions", None)
+ super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ # TODO: This is incorrect, but works with recording.
+ index = '{0:032d}'.format(chunk_offset)
+ block_id = encode_base64(url_quote(encode_base64(index)))
+ self.service.stage_block(
+ block_id,
+ len(chunk_data),
+ chunk_data,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ return index, block_id
+
+ def _upload_substream_block(self, index, block_stream):
+ try:
+ block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size))
+ self.service.stage_block(
+ block_id,
+ len(block_stream),
+ block_stream,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ finally:
+ block_stream.close()
+ return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def _is_chunk_empty(self, chunk_data):
+ # read until non-zero byte is encountered
+ # if reached the end without returning, then chunk_data is all 0's
+ return not any(bytearray(chunk_data))
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ # avoid uploading the empty pages
+ if not self._is_chunk_empty(chunk_data):
+ chunk_end = chunk_offset + len(chunk_data) - 1
+ content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
+ computed_md5 = None
+ self.response_headers = self.service.upload_pages(
+ body=chunk_data,
+ content_length=len(chunk_data),
+ transactional_content_md5=computed_md5,
+ range=content_range,
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+
+ if not self.parallel and self.request_options.get('modified_access_conditions'):
+ self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+ def _upload_substream_block(self, index, block_stream):
+ pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def __init__(self, *args, **kwargs):
+ super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ if self.current_length is None:
+ self.response_headers = self.service.append_block(
+ body=chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ self.current_length = int(self.response_headers["blob_append_offset"])
+ else:
+ self.request_options['append_position_access_conditions'].append_position = \
+ self.current_length + chunk_offset
+ self.response_headers = self.service.append_block(
+ body=chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+
+ def _upload_substream_block(self, index, block_stream):
+ pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ # avoid uploading the empty pages
+ self.response_headers = self.service.append_data(
+ body=chunk_data,
+ position=chunk_offset,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+
+ if not self.parallel and self.request_options.get('modified_access_conditions'):
+ self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+ def _upload_substream_block(self, index, block_stream):
+ try:
+ self.service.append_data(
+ body=block_stream,
+ position=index,
+ content_length=len(block_stream),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ finally:
+ block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ length = len(chunk_data)
+ chunk_end = chunk_offset + length - 1
+ response = self.service.upload_range(
+ chunk_data,
+ chunk_offset,
+ length,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
+
+ # TODO: Implement this method.
+ def _upload_substream_block(self, index, block_stream):
+ pass
+
+
+class SubStream(IOBase):
+
+ def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
+ # Python 2.7: file-like objects created with open() typically support seek(), but are not
+ # derivations of io.IOBase and thus do not implement seekable().
+ # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
+ try:
+ # only the main thread runs this, so there's no need grabbing the lock
+ wrapped_stream.seek(0, SEEK_CUR)
+ except:
+ raise ValueError("Wrapped stream must support seek().")
+
+ self._lock = lockObj
+ self._wrapped_stream = wrapped_stream
+ self._position = 0
+ self._stream_begin_index = stream_begin_index
+ self._length = length
+ self._buffer = BytesIO()
+
+ # we must avoid buffering more than necessary, and also not use up too much memory
+ # so the max buffer size is capped at 4MB
+ self._max_buffer_size = (
+ length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
+ )
+ self._current_buffer_start = 0
+ self._current_buffer_size = 0
+ super(SubStream, self).__init__()
+
+ def __len__(self):
+ return self._length
+
+ def close(self):
+ if self._buffer:
+ self._buffer.close()
+ self._wrapped_stream = None
+ IOBase.close(self)
+
+ def fileno(self):
+ return self._wrapped_stream.fileno()
+
+ def flush(self):
+ pass
+
+ def read(self, size=None):
+ if self.closed: # pylint: disable=using-constant-test
+ raise ValueError("Stream is closed.")
+
+ if size is None:
+ size = self._length - self._position
+
+ # adjust if out of bounds
+ if size + self._position >= self._length:
+ size = self._length - self._position
+
+ # return fast
+ if size == 0 or self._buffer.closed:
+ return b""
+
+ # attempt first read from the read buffer and update position
+ read_buffer = self._buffer.read(size)
+ bytes_read = len(read_buffer)
+ bytes_remaining = size - bytes_read
+ self._position += bytes_read
+
+ # repopulate the read buffer from the underlying stream to fulfill the request
+ # ensure the seek and read operations are done atomically (only if a lock is provided)
+ if bytes_remaining > 0:
+ with self._buffer:
+ # either read in the max buffer size specified on the class
+ # or read in just enough data for the current block/sub stream
+ current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
+
+ # lock is only defined if max_concurrency > 1 (parallel uploads)
+ if self._lock:
+ with self._lock:
+ # reposition the underlying stream to match the start of the data to read
+ absolute_position = self._stream_begin_index + self._position
+ self._wrapped_stream.seek(absolute_position, SEEK_SET)
+ # If we can't seek to the right location, our read will be corrupted so fail fast.
+ if self._wrapped_stream.tell() != absolute_position:
+ raise IOError("Stream failed to seek to the desired location.")
+ buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+ else:
+ absolute_position = self._stream_begin_index + self._position
+ # It's possible that there's connection problem during data transfer,
+ # so when we retry we don't want to read from current position of wrapped stream,
+ # instead we should seek to where we want to read from.
+ if self._wrapped_stream.tell() != absolute_position:
+ self._wrapped_stream.seek(absolute_position, SEEK_SET)
+
+ buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+
+ if buffer_from_stream:
+ # update the buffer with new data from the wrapped stream
+ # we need to note down the start position and size of the buffer, in case seek is performed later
+ self._buffer = BytesIO(buffer_from_stream)
+ self._current_buffer_start = self._position
+ self._current_buffer_size = len(buffer_from_stream)
+
+ # read the remaining bytes from the new buffer and update position
+ second_read_buffer = self._buffer.read(bytes_remaining)
+ read_buffer += second_read_buffer
+ self._position += len(second_read_buffer)
+
+ return read_buffer
+
+ def readable(self):
+ return True
+
+ def readinto(self, b):
+ raise UnsupportedOperation
+
+ def seek(self, offset, whence=0):
+ if whence is SEEK_SET:
+ start_index = 0
+ elif whence is SEEK_CUR:
+ start_index = self._position
+ elif whence is SEEK_END:
+ start_index = self._length
+ offset = -offset
+ else:
+ raise ValueError("Invalid argument for the 'whence' parameter.")
+
+ pos = start_index + offset
+
+ if pos > self._length:
+ pos = self._length
+ elif pos < 0:
+ pos = 0
+
+ # check if buffer is still valid
+ # if not, drop buffer
+ if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
+ self._buffer.close()
+ self._buffer = BytesIO()
+ else: # if yes seek to correct position
+ delta = pos - self._current_buffer_start
+ self._buffer.seek(delta, SEEK_SET)
+
+ self._position = pos
+ return pos
+
+ def seekable(self):
+ return True
+
+ def tell(self):
+ return self._position
+
+ def write(self):
+ raise UnsupportedOperation
+
+ def writelines(self):
+ raise UnsupportedOperation
+
+ def writeable(self):
+ return False
+
+
+class IterStreamer(object):
+ """
+ File-like streaming iterator.
+ """
+
+ def __init__(self, generator, encoding="UTF-8"):
+ self.generator = generator
+ self.iterator = iter(generator)
+ self.leftover = b""
+ self.encoding = encoding
+
+ def __len__(self):
+ return self.generator.__len__()
+
+ def __iter__(self):
+ return self.iterator
+
+ def seekable(self):
+ return False
+
+ def __next__(self):
+ return next(self.iterator)
+
+ next = __next__ # Python 2 compatibility.
+
+ def tell(self, *args, **kwargs):
+ raise UnsupportedOperation("Data generator does not support tell.")
+
+ def seek(self, *args, **kwargs):
+ raise UnsupportedOperation("Data generator is unseekable.")
+
+ def read(self, size):
+ data = self.leftover
+ count = len(self.leftover)
+ try:
+ while count < size:
+ chunk = self.__next__()
+ if isinstance(chunk, six.text_type):
+ chunk = chunk.encode(self.encoding)
+ data += chunk
+ count += len(chunk)
+ except StopIteration:
+ pass
+
+ if count > size:
+ self.leftover = data[size:]
+
+ return data[:size]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/uploads_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/uploads_async.py
new file mode 100644
index 00000000000..5ed192b3659
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared/uploads_async.py
@@ -0,0 +1,395 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+
+import asyncio
+from asyncio import Lock
+from itertools import islice
+import threading
+
+from math import ceil
+
+import six
+
+from . import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .encryption import get_blob_encryptor_and_padder
+from .uploads import SubStream, IterStreamer # pylint: disable=unused-import
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
+
+
+async def _parallel_uploads(uploader, pending, running):
+ range_ids = []
+ while True:
+ # Wait for some download to finish before adding a new one
+ done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+ range_ids.extend([chunk.result() for chunk in done])
+ try:
+ for _ in range(0, len(done)):
+ next_chunk = next(pending)
+ running.add(asyncio.ensure_future(uploader(next_chunk)))
+ except StopIteration:
+ break
+
+ # Wait for the remaining uploads to finish
+ if running:
+ done, _running = await asyncio.wait(running)
+ range_ids.extend([chunk.result() for chunk in done])
+ return range_ids
+
+
+async def upload_data_chunks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ encryption_options=None,
+ **kwargs):
+
+ if encryption_options:
+ encryptor, padder = get_blob_encryptor_and_padder(
+ encryption_options.get('cek'),
+ encryption_options.get('vector'),
+ uploader_class is not PageBlobChunkUploader)
+ kwargs['encryptor'] = encryptor
+ kwargs['padder'] = padder
+
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ **kwargs)
+
+ if parallel:
+ upload_tasks = uploader.get_chunk_streams()
+ running_futures = [
+ asyncio.ensure_future(uploader.process_chunk(u))
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
+ else:
+ range_ids = []
+ for chunk in uploader.get_chunk_streams():
+ range_ids.append(await uploader.process_chunk(chunk))
+
+ if any(range_ids):
+ return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+ return uploader.response_headers
+
+
+async def upload_substream_blocks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ **kwargs):
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ **kwargs)
+
+ if parallel:
+ upload_tasks = uploader.get_substream_blocks()
+ running_futures = [
+ asyncio.ensure_future(uploader.process_substream_block(u))
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
+ else:
+ range_ids = []
+ for block in uploader.get_substream_blocks():
+ range_ids.append(await uploader.process_substream_block(block))
+ if any(range_ids):
+ return sorted(range_ids)
+ return
+
+
+class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
+
+ def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
+ self.service = service
+ self.total_size = total_size
+ self.chunk_size = chunk_size
+ self.stream = stream
+ self.parallel = parallel
+
+ # Stream management
+ self.stream_start = stream.tell() if parallel else None
+ self.stream_lock = threading.Lock() if parallel else None
+
+ # Progress feedback
+ self.progress_total = 0
+ self.progress_lock = Lock() if parallel else None
+
+ # Encryption
+ self.encryptor = encryptor
+ self.padder = padder
+ self.response_headers = None
+ self.etag = None
+ self.last_modified = None
+ self.request_options = kwargs
+
+ def get_chunk_streams(self):
+ index = 0
+ while True:
+ data = b''
+ read_size = self.chunk_size
+
+ # Buffer until we either reach the end of the stream or get a whole chunk.
+ while True:
+ if self.total_size:
+ read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+ temp = self.stream.read(read_size)
+ if not isinstance(temp, six.binary_type):
+ raise TypeError('Blob data should be of type bytes.')
+ data += temp or b""
+
+ # We have read an empty string and so are at the end
+ # of the buffer or we have read a full chunk.
+ if temp == b'' or len(data) == self.chunk_size:
+ break
+
+ if len(data) == self.chunk_size:
+ if self.padder:
+ data = self.padder.update(data)
+ if self.encryptor:
+ data = self.encryptor.update(data)
+ yield index, data
+ else:
+ if self.padder:
+ data = self.padder.update(data) + self.padder.finalize()
+ if self.encryptor:
+ data = self.encryptor.update(data) + self.encryptor.finalize()
+ if data:
+ yield index, data
+ break
+ index += len(data)
+
+ async def process_chunk(self, chunk_data):
+ chunk_bytes = chunk_data[1]
+ chunk_offset = chunk_data[0]
+ return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+ async def _update_progress(self, length):
+ if self.progress_lock is not None:
+ async with self.progress_lock:
+ self.progress_total += length
+ else:
+ self.progress_total += length
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+ range_id = await self._upload_chunk(chunk_offset, chunk_data)
+ await self._update_progress(len(chunk_data))
+ return range_id
+
+ def get_substream_blocks(self):
+ assert self.chunk_size is not None
+ lock = self.stream_lock
+ blob_length = self.total_size
+
+ if blob_length is None:
+ blob_length = get_length(self.stream)
+ if blob_length is None:
+ raise ValueError("Unable to determine content length of upload data.")
+
+ blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+ last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+ for i in range(blocks):
+ index = i * self.chunk_size
+ length = last_block_size if i == blocks - 1 else self.chunk_size
+ yield index, SubStream(self.stream, index, length, lock)
+
+ async def process_substream_block(self, block_data):
+ return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+ async def _upload_substream_block(self, index, block_stream):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ async def _upload_substream_block_with_progress(self, index, block_stream):
+ range_id = await self._upload_substream_block(index, block_stream)
+ await self._update_progress(len(block_stream))
+ return range_id
+
+ def set_response_properties(self, resp):
+ self.etag = resp.etag
+ self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+ def __init__(self, *args, **kwargs):
+ kwargs.pop('modified_access_conditions', None)
+ super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ # TODO: This is incorrect, but works with recording.
+ index = '{0:032d}'.format(chunk_offset)
+ block_id = encode_base64(url_quote(encode_base64(index)))
+ await self.service.stage_block(
+ block_id,
+ len(chunk_data),
+ body=chunk_data,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+ return index, block_id
+
+ async def _upload_substream_block(self, index, block_stream):
+ try:
+ block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size))
+ await self.service.stage_block(
+ block_id,
+ len(block_stream),
+ block_stream,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+ finally:
+ block_stream.close()
+ return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def _is_chunk_empty(self, chunk_data):
+ # read until non-zero byte is encountered
+ # if reached the end without returning, then chunk_data is all 0's
+ for each_byte in chunk_data:
+ if each_byte not in [0, b'\x00']:
+ return False
+ return True
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ # avoid uploading the empty pages
+ if not self._is_chunk_empty(chunk_data):
+ chunk_end = chunk_offset + len(chunk_data) - 1
+ content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
+ computed_md5 = None
+ self.response_headers = await self.service.upload_pages(
+ body=chunk_data,
+ content_length=len(chunk_data),
+ transactional_content_md5=computed_md5,
+ range=content_range,
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+
+ if not self.parallel and self.request_options.get('modified_access_conditions'):
+ self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+ async def _upload_substream_block(self, index, block_stream):
+ pass
+
+
+class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def __init__(self, *args, **kwargs):
+ super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ if self.current_length is None:
+ self.response_headers = await self.service.append_block(
+ body=chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+ self.current_length = int(self.response_headers['blob_append_offset'])
+ else:
+ self.request_options['append_position_access_conditions'].append_position = \
+ self.current_length + chunk_offset
+ self.response_headers = await self.service.append_block(
+ body=chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+
+ async def _upload_substream_block(self, index, block_stream):
+ pass
+
+
+class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ self.response_headers = await self.service.append_data(
+ body=chunk_data,
+ position=chunk_offset,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+
+ if not self.parallel and self.request_options.get('modified_access_conditions'):
+ self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+ async def _upload_substream_block(self, index, block_stream):
+ try:
+ await self.service.append_data(
+ body=block_stream,
+ position=index,
+ content_length=len(block_stream),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ finally:
+ block_stream.close()
+
+
+class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ length = len(chunk_data)
+ chunk_end = chunk_offset + length - 1
+ response = await self.service.upload_range(
+ chunk_data,
+ chunk_offset,
+ length,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
+ return range_id, response
+
+ # TODO: Implement this method.
+ async def _upload_substream_block(self, index, block_stream):
+ pass
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared_access_signature.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared_access_signature.py
new file mode 100644
index 00000000000..4c23b05277f
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_shared_access_signature.py
@@ -0,0 +1,391 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+
+from ...blob import generate_account_sas as generate_blob_account_sas
+from ...blob import generate_container_sas, generate_blob_sas
+if TYPE_CHECKING:
+ import datetime
+ from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \
+ UserDelegationKey
+
+
+def generate_account_sas(
+ account_name, # type: str
+ account_key, # type: str
+ resource_types, # type: Union[ResourceTypes, str]
+ permission, # type: Union[AccountSasPermissions, str]
+ expiry, # type: Optional[Union[datetime, str]]
+ **kwargs # type: Any
+ ): # type: (...) -> str
+ """Generates a shared access signature for the DataLake service.
+
+ Use the returned signature as the credential parameter of any DataLakeServiceClient,
+ FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+ :param str account_name:
+ The storage account name used to generate the shared access signature.
+ :param str account_key:
+ The access key to generate the shared access signature.
+ :param resource_types:
+ Specifies the resource types that are accessible with the account SAS.
+ :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :keyword start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :paramtype start: ~datetime.datetime or str
+ :keyword str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :keyword str protocol:
+ Specifies the protocol permitted for a request made. The default value is https.
+ :return: A Shared Access Signature (sas) token.
+ :rtype: str
+ """
+ return generate_blob_account_sas(
+ account_name=account_name,
+ account_key=account_key,
+ resource_types=resource_types,
+ permission=permission,
+ expiry=expiry,
+ **kwargs
+ )
+
+
+def generate_file_system_sas(
+ account_name, # type: str
+ file_system_name, # type: str
+ credential, # type: Union[str, UserDelegationKey]
+ permission=None, # type: Optional[Union[FileSystemSasPermissions, str]]
+ expiry=None, # type: Optional[Union[datetime, str]]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> str
+ """Generates a shared access signature for a file system.
+
+ Use the returned signature with the credential parameter of any DataLakeServiceClient,
+ FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+ :param str account_name:
+ The storage account name used to generate the shared access signature.
+ :param str file_system_name:
+ The name of the file system.
+ :param str credential:
+ Credential could be either account key or user delegation key.
+ If use account key is used as credential, then the credential type should be a str.
+ Instead of an account key, the user could also pass in a user delegation key.
+ A user delegation key can be obtained from the service by authenticating with an AAD identity;
+ this can be accomplished
+ by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+ When present, the SAS is signed with the user delegation key instead.
+ :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Permissions must be ordered read, write, delete, list.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: datetime or str
+ :keyword start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :paramtype start: datetime or str
+ :keyword str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :keyword str protocol:
+ Specifies the protocol permitted for a request made. The default value is https.
+ :keyword str cache_control:
+ Response header value for Cache-Control when resource is accessed
+ using this shared access signature.
+ :keyword str content_disposition:
+ Response header value for Content-Disposition when resource is accessed
+ using this shared access signature.
+ :keyword str content_encoding:
+ Response header value for Content-Encoding when resource is accessed
+ using this shared access signature.
+ :keyword str content_language:
+ Response header value for Content-Language when resource is accessed
+ using this shared access signature.
+ :keyword str content_type:
+ Response header value for Content-Type when resource is accessed
+ using this shared access signature.
+ :keyword str preauthorized_agent_object_id:
+ The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+ the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+ user delegation key has the required permissions before granting access but no additional permission check for
+ the agent object id will be performed.
+ :keyword str agent_object_id:
+ The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+ perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+ of the user delegation key has the required permissions before granting access and the service will perform an
+ additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+ :keyword str correlation_id:
+ The correlation id to correlate the storage audit logs with the audit logs used by the principal
+ generating and distributing the SAS.
+ :return: A Shared Access Signature (sas) token.
+ :rtype: str
+ """
+ return generate_container_sas(
+ account_name=account_name,
+ container_name=file_system_name,
+ account_key=credential if isinstance(credential, str) else None,
+ user_delegation_key=credential if not isinstance(credential, str) else None,
+ permission=permission,
+ expiry=expiry,
+ **kwargs)
+
+
+def generate_directory_sas(
+ account_name, # type: str
+ file_system_name, # type: str
+ directory_name, # type: str
+ credential, # type: Union[str, UserDelegationKey]
+ permission=None, # type: Optional[Union[FileSasPermissions, str]]
+ expiry=None, # type: Optional[Union[datetime, str]]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> str
+ """Generates a shared access signature for a directory.
+
+ Use the returned signature with the credential parameter of any DataLakeServiceClient,
+ FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+ :param str account_name:
+ The storage account name used to generate the shared access signature.
+ :param str file_system_name:
+ The name of the file system.
+ :param str directory_name:
+ The name of the directory.
+ :param str credential:
+ Credential could be either account key or user delegation key.
+ If use account key is used as credential, then the credential type should be a str.
+ Instead of an account key, the user could also pass in a user delegation key.
+ A user delegation key can be obtained from the service by authenticating with an AAD identity;
+ this can be accomplished
+ by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+ When present, the SAS is signed with the user delegation key instead.
+ :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Permissions must be ordered read, write, delete, list.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~azure.storage.filedatalake.FileSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :keyword start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :paramtype start: ~datetime.datetime or str
+ :keyword str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :keyword str protocol:
+ Specifies the protocol permitted for a request made. The default value is https.
+ :keyword str cache_control:
+ Response header value for Cache-Control when resource is accessed
+ using this shared access signature.
+ :keyword str content_disposition:
+ Response header value for Content-Disposition when resource is accessed
+ using this shared access signature.
+ :keyword str content_encoding:
+ Response header value for Content-Encoding when resource is accessed
+ using this shared access signature.
+ :keyword str content_language:
+ Response header value for Content-Language when resource is accessed
+ using this shared access signature.
+ :keyword str content_type:
+ Response header value for Content-Type when resource is accessed
+ using this shared access signature.
+ :keyword str preauthorized_agent_object_id:
+ The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+ the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+ user delegation key has the required permissions before granting access but no additional permission check for
+ the agent object id will be performed.
+ :keyword str agent_object_id:
+ The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+ perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+ of the user delegation key has the required permissions before granting access and the service will perform an
+ additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+ :keyword str correlation_id:
+ The correlation id to correlate the storage audit logs with the audit logs used by the principal
+ generating and distributing the SAS.
+ :return: A Shared Access Signature (sas) token.
+ :rtype: str
+ """
+ depth = len(directory_name.strip("/").split("/"))
+ return generate_blob_sas(
+ account_name=account_name,
+ container_name=file_system_name,
+ blob_name=directory_name,
+ account_key=credential if isinstance(credential, str) else None,
+ user_delegation_key=credential if not isinstance(credential, str) else None,
+ permission=permission,
+ expiry=expiry,
+ sdd=depth,
+ is_directory=True,
+ **kwargs)
+
+
+def generate_file_sas(
+ account_name, # type: str
+ file_system_name, # type: str
+ directory_name, # type: str
+ file_name, # type: str
+ credential, # type: Union[str, UserDelegationKey]
+ permission=None, # type: Optional[Union[FileSasPermissions, str]]
+ expiry=None, # type: Optional[Union[datetime, str]]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> str
+ """Generates a shared access signature for a file.
+
+ Use the returned signature with the credential parameter of any BDataLakeServiceClient,
+ FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+ :param str account_name:
+ The storage account name used to generate the shared access signature.
+ :param str file_system_name:
+ The name of the file system.
+ :param str directory_name:
+ The name of the directory.
+ :param str file_name:
+ The name of the file.
+ :param str credential:
+ Credential could be either account key or user delegation key.
+ If use account key is used as credential, then the credential type should be a str.
+ Instead of an account key, the user could also pass in a user delegation key.
+ A user delegation key can be obtained from the service by authenticating with an AAD identity;
+ this can be accomplished
+ by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
+ When present, the SAS is signed with the user delegation key instead.
+ :type credential: str or ~azure.storage.filedatalake.UserDelegationKey
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Permissions must be ordered read, write, delete, list.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~azure.storage.filedatalake.FileSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :keyword start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :paramtype start: ~datetime.datetime or str
+ :keyword str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :keyword str protocol:
+ Specifies the protocol permitted for a request made. The default value is https.
+ :keyword str cache_control:
+ Response header value for Cache-Control when resource is accessed
+ using this shared access signature.
+ :keyword str content_disposition:
+ Response header value for Content-Disposition when resource is accessed
+ using this shared access signature.
+ :keyword str content_encoding:
+ Response header value for Content-Encoding when resource is accessed
+ using this shared access signature.
+ :keyword str content_language:
+ Response header value for Content-Language when resource is accessed
+ using this shared access signature.
+ :keyword str content_type:
+ Response header value for Content-Type when resource is accessed
+ using this shared access signature.
+ :keyword str preauthorized_agent_object_id:
+ The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
+ the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
+ user delegation key has the required permissions before granting access but no additional permission check for
+ the agent object id will be performed.
+ :keyword str agent_object_id:
+ The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
+ perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
+ of the user delegation key has the required permissions before granting access and the service will perform an
+ additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
+ :keyword str correlation_id:
+ The correlation id to correlate the storage audit logs with the audit logs used by the principal
+ generating and distributing the SAS. This can only be used when to generate sas with delegation key.
+ :return: A Shared Access Signature (sas) token.
+ :rtype: str
+ """
+ if directory_name:
+ path = directory_name.rstrip('/') + "/" + file_name
+ else:
+ path = file_name
+ return generate_blob_sas(
+ account_name=account_name,
+ container_name=file_system_name,
+ blob_name=path,
+ account_key=credential if isinstance(credential, str) else None,
+ user_delegation_key=credential if not isinstance(credential, str) else None,
+ permission=permission,
+ expiry=expiry,
+ **kwargs)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_upload_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_upload_helper.py
new file mode 100644
index 00000000000..6d88c32a444
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_upload_helper.py
@@ -0,0 +1,104 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+
+from ._deserialize import (
+ process_storage_error)
+from ._shared.response_handlers import return_response_headers
+from ._shared.uploads import (
+ upload_data_chunks,
+ DataLakeFileChunkUploader, upload_substream_blocks)
+from azure.core.exceptions import HttpResponseError
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument
+ return any([
+ modified_access_conditions.if_modified_since,
+ modified_access_conditions.if_unmodified_since,
+ modified_access_conditions.if_none_match,
+ modified_access_conditions.if_match
+ ])
+
+
+def upload_datalake_file( # pylint: disable=unused-argument
+ client=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ validate_content=None,
+ max_concurrency=None,
+ file_settings=None,
+ **kwargs):
+ try:
+ if length == 0:
+ return {}
+ properties = kwargs.pop('properties', None)
+ umask = kwargs.pop('umask', None)
+ permissions = kwargs.pop('permissions', None)
+ path_http_headers = kwargs.pop('path_http_headers', None)
+ modified_access_conditions = kwargs.pop('modified_access_conditions', None)
+ chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
+
+ if not overwrite:
+ # if customers didn't specify access conditions, they cannot flush data to existing file
+ if not _any_conditions(modified_access_conditions):
+ modified_access_conditions.if_none_match = '*'
+ if properties or umask or permissions:
+ raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
+
+ if overwrite:
+ response = client.create(
+ resource='file',
+ path_http_headers=path_http_headers,
+ properties=properties,
+ modified_access_conditions=modified_access_conditions,
+ umask=umask,
+ permissions=permissions,
+ cls=return_response_headers,
+ **kwargs)
+
+ # this modified_access_conditions will be applied to flush_data to make sure
+ # no other flush between create and the current flush
+ modified_access_conditions.if_match = response['etag']
+ modified_access_conditions.if_none_match = None
+ modified_access_conditions.if_modified_since = None
+ modified_access_conditions.if_unmodified_since = None
+
+ use_original_upload_path = file_settings.use_byte_buffer or \
+ validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
+ hasattr(stream, 'seekable') and not stream.seekable() or \
+ not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+ if use_original_upload_path:
+ upload_data_chunks(
+ service=client,
+ uploader_class=DataLakeFileChunkUploader,
+ total_size=length,
+ chunk_size=chunk_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ **kwargs)
+ else:
+ upload_substream_blocks(
+ service=client,
+ uploader_class=DataLakeFileChunkUploader,
+ total_size=length,
+ chunk_size=chunk_size,
+ max_concurrency=max_concurrency,
+ stream=stream,
+ validate_content=validate_content,
+ **kwargs
+ )
+
+ return client.flush_data(position=length,
+ path_http_headers=path_http_headers,
+ modified_access_conditions=modified_access_conditions,
+ close=True,
+ cls=return_response_headers,
+ **kwargs)
+ except HttpResponseError as error:
+ process_storage_error(error)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_version.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_version.py
new file mode 100644
index 00000000000..85a0126d5fa
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/_version.py
@@ -0,0 +1,7 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+VERSION = "12.4.0b1"
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/__init__.py
new file mode 100644
index 00000000000..c24dde8d347
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/__init__.py
@@ -0,0 +1,24 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from ._download_async import StorageStreamDownloader
+from .._shared.policies_async import ExponentialRetry, LinearRetry
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._file_system_client_async import FileSystemClient
+from ._data_lake_service_client_async import DataLakeServiceClient
+from ._data_lake_lease_async import DataLakeLeaseClient
+
+__all__ = [
+ 'DataLakeServiceClient',
+ 'FileSystemClient',
+ 'DataLakeDirectoryClient',
+ 'DataLakeFileClient',
+ 'DataLakeLeaseClient',
+ 'ExponentialRetry',
+ 'LinearRetry',
+ 'StorageStreamDownloader'
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py
new file mode 100644
index 00000000000..7d0adefd268
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py
@@ -0,0 +1,551 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+from typing import Any
+
+try:
+ from urllib.parse import quote, unquote
+except ImportError:
+ from urllib2 import quote, unquote # type: ignore
+from azure.core.pipeline import AsyncPipeline
+from ._data_lake_file_client_async import DataLakeFileClient
+from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase
+from .._models import DirectoryProperties, FileProperties
+from .._deserialize import deserialize_dir_properties
+from ._path_client_async import PathClient
+from .._shared.base_client_async import AsyncTransportWrapper
+
+
+class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase):
+ """A client to interact with the DataLake directory, even if the directory may not yet exist.
+
+ For operations relating to a specific subdirectory or file under the directory, a directory client or file client
+ can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions.
+
+ :ivar str url:
+ The full endpoint URL to the file system, including SAS token if used.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URI to the storage account.
+ :param file_system_name:
+ The file system for the directory or files.
+ :type file_system_name: str
+ :param directory_name:
+ The whole path of the directory. eg. {directory under file system}/{directory to interact with}
+ :type directory_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py
+ :start-after: [START instantiate_directory_client_from_conn_str]
+ :end-before: [END instantiate_directory_client_from_conn_str]
+ :language: python
+ :dedent: 4
+ :caption: Creating the DataLakeServiceClient from connection string.
+ """
+
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ directory_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call
+ credential=credential, **kwargs)
+
+ async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Create a new directory.
+
+ :param metadata:
+ Name-value pairs associated with the directory as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword lease:
+ Required if the directory has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: response dict (Etag and last modified).
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory_async.py
+ :start-after: [START create_directory]
+ :end-before: [END create_directory]
+ :language: python
+ :dedent: 8
+ :caption: Create directory.
+ """
+ return await self._create('directory', metadata=metadata, **kwargs)
+
+ async def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a directory exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return await self._exists(**kwargs)
+
+ async def delete_directory(self, **kwargs):
+ # type: (...) -> None
+ """
+ Marks the specified directory for deletion.
+
+ :keyword lease:
+ Required if the directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory_async.py
+ :start-after: [START delete_directory]
+ :end-before: [END delete_directory]
+ :language: python
+ :dedent: 4
+ :caption: Delete directory.
+ """
+ return await self._delete(recursive=True, **kwargs)
+
+ async def get_directory_properties(self, **kwargs):
+ # type: (**Any) -> DirectoryProperties
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the directory. It does not return the content of the directory.
+
+ :keyword lease:
+ Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: DirectoryProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory_async.py
+ :start-after: [START get_directory_properties]
+ :end-before: [END get_directory_properties]
+ :language: python
+ :dedent: 4
+ :caption: Getting the properties for a file/directory.
+ """
+ return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access
+
+ async def rename_directory(self, new_name, # type: str
+ **kwargs):
+ # type: (**Any) -> DataLakeDirectoryClient
+ """
+ Rename the source directory.
+
+ :param str new_name:
+ the new directory name the user want to rename to.
+ The value must have the following format: "{filesystem}/{directory}/{subdirectory}".
+ :keyword source_lease:
+ A lease ID for the source path. If specified,
+ the source path must have an active lease and the leaase ID must
+ match.
+ :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_directory_async.py
+ :start-after: [START rename_directory]
+ :end-before: [END rename_directory]
+ :language: python
+ :dedent: 4
+ :caption: Rename the source directory.
+ """
+ new_name = new_name.strip('/')
+ new_file_system = new_name.split('/')[0]
+ new_path_and_token = new_name[len(new_file_system):].strip('/').split('?')
+ new_path = new_path_and_token[0]
+ try:
+ new_dir_sas = new_path_and_token[1] or self._query_str.strip('?')
+ except IndexError:
+ if not self._raw_credential and new_file_system != self.file_system_name:
+ raise ValueError("please provide the sas token for the new directory")
+ if not self._raw_credential and new_file_system == self.file_system_name:
+ new_dir_sas = self._query_str.strip('?')
+
+ new_directory_client = DataLakeDirectoryClient(
+ "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path,
+ credential=self._raw_credential or new_dir_sas,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ _location_mode=self._location_mode, require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+ await new_directory_client._rename_path( # pylint: disable=protected-access
+ '/{}/{}{}'.format(quote(unquote(self.file_system_name)),
+ quote(unquote(self.path_name)),
+ self._query_str),
+ **kwargs)
+ return new_directory_client
+
+ async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Create a subdirectory and return the subdirectory client to be interacted with.
+
+ :param sub_directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient for the subdirectory.
+ """
+ subdir = self.get_sub_directory_client(sub_directory)
+ await subdir.create_directory(metadata=metadata, **kwargs)
+ return subdir
+
+ async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Marks the specified subdirectory for deletion.
+
+ :param sub_directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :keyword lease:
+ Required if the directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient for the subdirectory
+ """
+ subdir = self.get_sub_directory_client(sub_directory)
+ await subdir.delete_directory(**kwargs)
+ return subdir
+
+ async def create_file(self, file, # type: Union[FileProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeFileClient
+ """
+ Create a new file and return the file client to be interacted with.
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties.
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeFileClient
+ """
+ file_client = self.get_file_client(file)
+ await file_client.create_file(**kwargs)
+ return file_client
+
+ def get_file_client(self, file # type: Union[FileProperties, str]
+ ):
+ # type: (...) -> DataLakeFileClient
+ """Get a client to interact with the specified file.
+
+ The file need not already exist.
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties. eg. directory/subdirectory/file
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :returns: A DataLakeFileClient.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/test_datalake_service_samples.py
+ :start-after: [START bsc_get_file_client]
+ :end-before: [END bsc_get_file_client]
+ :language: python
+ :dedent: 12
+ :caption: Getting the file client to interact with a specific file.
+ """
+ try:
+ file_path = file.get('name')
+ except AttributeError:
+ file_path = self.path_name + '/' + str(file)
+
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeFileClient(
+ self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ _location_mode=self._location_mode, require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str]
+ ):
+ # type: (...) -> DataLakeDirectoryClient
+ """Get a client to interact with the specified subdirectory of the current directory.
+
+ The sub subdirectory need not already exist.
+
+ :param sub_directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/test_datalake_service_samples.py
+ :start-after: [START bsc_get_directory_client]
+ :end-before: [END bsc_get_directory_client]
+ :language: python
+ :dedent: 12
+ :caption: Getting the directory client to interact with a specific directory.
+ """
+ try:
+ subdir_path = sub_directory.get('name')
+ except AttributeError:
+ subdir_path = self.path_name + '/' + str(sub_directory)
+
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeDirectoryClient(
+ self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ _location_mode=self._location_mode, require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py
new file mode 100644
index 00000000000..df25ecf51f9
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py
@@ -0,0 +1,574 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+from typing import Any
+try:
+ from urllib.parse import quote, unquote
+except ImportError:
+ from urllib2 import quote, unquote # type: ignore
+
+from azure.core.exceptions import HttpResponseError
+from ._download_async import StorageStreamDownloader
+from ._path_client_async import PathClient
+from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase
+from .._serialize import convert_datetime_to_rfc1123
+from .._deserialize import process_storage_error, deserialize_file_properties
+from .._models import FileProperties
+from ..aio._upload_helper import upload_datalake_file
+
+
+class DataLakeFileClient(PathClient, DataLakeFileClientBase):
+ """A client to interact with the DataLake file, even if the file may not yet exist.
+
+ :ivar str url:
+ The full endpoint URL to the file system, including SAS token if used.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URI to the storage account.
+ :param file_system_name:
+ The file system for the directory or files.
+ :type file_system_name: str
+ :param file_path:
+ The whole file path, so that to interact with a specific file.
+ eg. "{directory}/{subdirectory}/{file}"
+ :type file_path: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py
+ :start-after: [START instantiate_file_client_from_conn_str]
+ :end-before: [END instantiate_file_client_from_conn_str]
+ :language: python
+ :dedent: 4
+ :caption: Creating the DataLakeServiceClient from connection string.
+ """
+
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ file_path, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path,
+ credential=credential, **kwargs)
+
+ async def create_file(self, content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Create a new file.
+
+ :param ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: response dict (Etag and last modified).
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+ :start-after: [START create_file]
+ :end-before: [END create_file]
+ :language: python
+ :dedent: 4
+ :caption: Create file.
+ """
+ return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs)
+
+ async def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a file exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return await self._exists(**kwargs)
+
+ async def delete_file(self, **kwargs):
+ # type: (...) -> None
+ """
+ Marks the specified file for deletion.
+
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+ :start-after: [START delete_file]
+ :end-before: [END delete_file]
+ :language: python
+ :dedent: 4
+ :caption: Delete file.
+ """
+ return await self._delete(**kwargs)
+
+ async def get_file_properties(self, **kwargs):
+ # type: (**Any) -> FileProperties
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the file. It does not return the content of the file.
+
+ :keyword lease:
+ Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: FileProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+ :start-after: [START get_file_properties]
+ :end-before: [END get_file_properties]
+ :language: python
+ :dedent: 4
+ :caption: Getting the properties for a file.
+ """
+ return await self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access
+
+ async def set_file_expiry(self, expiry_options, # type: str
+ expires_on=None, # type: Optional[Union[datetime, int]]
+ **kwargs):
+ # type: (str, Optional[Union[datetime, int]], **Any) -> None
+ """Sets the time a file will expire and be deleted.
+
+ :param str expiry_options:
+ Required. Indicates mode of the expiry time.
+ Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute'
+ :param datetime or int expires_on:
+ The time to set the file to expiry.
+ When expiry_options is RelativeTo*, expires_on should be an int in milliseconds
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ try:
+ expires_on = convert_datetime_to_rfc1123(expires_on)
+ except AttributeError:
+ expires_on = str(expires_on)
+ await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on,
+ **kwargs) # pylint: disable=protected-access
+
+ async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ overwrite=False, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+ """
+ Upload data to a file.
+
+ :param data: Content to be uploaded to file
+ :param int length: Size of the data in bytes.
+ :param bool overwrite: to overwrite an existing file or not.
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword metadata:
+ Name-value pairs associated with the blob as metadata.
+ :paramtype metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease:
+ Required if the blob has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions: Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the file. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword int chunk_size:
+ The maximum chunk size for uploading a file in chunks.
+ Defaults to 100*1024*1024, or 100MB.
+ :return: response dict (Etag and last modified).
+ """
+ options = self._upload_options(
+ data,
+ length=length,
+ overwrite=overwrite,
+ **kwargs)
+ return await upload_datalake_file(**options)
+
+ async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
+ offset, # type: int
+ length=None, # type: Optional[int]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """Append data to the file.
+
+ :param data: Content to be appended to file
+ :param offset: start position of the data to be appended to.
+ :param length: Size of the data in bytes.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the block content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https as https (the default)
+ will already validate. Note that this MD5 hash is not stored with the
+ file.
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :return: dict of the response header
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+ :start-after: [START append_data]
+ :end-before: [END append_data]
+ :language: python
+ :dedent: 4
+ :caption: Append data to the file.
+ """
+ options = self._append_data_options(
+ data,
+ offset,
+ length=length,
+ **kwargs)
+ try:
+ return await self._client.path.append_data(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def flush_data(self, offset, # type: int
+ retain_uncommitted_data=False, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """ Commit the previous appended data.
+
+ :param offset: offset is equal to the length of the file after commit the
+ previous appended data.
+ :param bool retain_uncommitted_data: Valid only for flush operations. If
+ "true", uncommitted data is retained after the flush operation
+ completes; otherwise, the uncommitted data is deleted after the flush
+ operation. The default is false. Data at offsets less than the
+ specified position are written to the file when flush succeeds, but
+ this optional parameter allows data after the flush position to be
+ retained for a future flush operation.
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword bool close: Azure Storage Events allow applications to receive
+ notifications when files change. When Azure Storage Events are
+ enabled, a file changed event is raised. This event has a property
+ indicating whether this is the final change to distinguish the
+ difference between an intermediate flush to a file stream and the
+ final close of a file stream. The close query parameter is valid only
+ when the action is "flush" and change notifications are enabled. If
+ the value of close is "true" and the flush operation completes
+ successfully, the service raises a file change notification with a
+ property indicating that this is the final update (the file stream has
+ been closed). If "false" a change notification is raised indicating
+ the file has changed. The default is false. This query parameter is
+ set to true by the Hadoop ABFS driver to indicate that the file stream
+ has been closed."
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :return: response header in dict
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START upload_file_to_file_system]
+ :end-before: [END upload_file_to_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Commit the previous appended data.
+ """
+ options = self._flush_data_options(
+ offset,
+ retain_uncommitted_data=retain_uncommitted_data, **kwargs)
+ try:
+ return await self._client.path.flush_data(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def download_file(self, offset=None, length=None, **kwargs):
+ # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
+ """Downloads a file to the StorageStreamDownloader. The readall() method must
+ be used to read all the content, or readinto() must be used to download the file into
+ a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+ :param int offset:
+ Start of byte range to use for downloading a section of the file.
+ Must be set if length is provided.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword lease:
+ If specified, download only succeeds if the file's lease is active
+ and matches this ID. Required if the file has an active lease.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: A streaming object (StorageStreamDownloader)
+ :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+ :start-after: [START read_file]
+ :end-before: [END read_file]
+ :language: python
+ :dedent: 4
+ :caption: Return the downloaded data.
+ """
+ downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs)
+ return StorageStreamDownloader(downloader)
+
+ async def rename_file(self, new_name, **kwargs):
+ # type: (str, **Any) -> DataLakeFileClient
+ """
+ Rename the source file.
+
+ :param str new_name: the new file name the user want to rename to.
+ The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}".
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword source_lease: A lease ID for the source path. If specified,
+ the source path must have an active lease and the leaase ID must
+ match.
+ :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :type permissions: str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: the renamed file client
+ :rtype: DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_upload_download_async.py
+ :start-after: [START rename_file]
+ :end-before: [END rename_file]
+ :language: python
+ :dedent: 4
+ :caption: Rename the source file.
+ """
+ new_name = new_name.strip('/')
+ new_file_system = new_name.split('/')[0]
+ new_path_and_token = new_name[len(new_file_system):].strip('/').split('?')
+ new_path = new_path_and_token[0]
+ try:
+ new_file_sas = new_path_and_token[1] or self._query_str.strip('?')
+ except IndexError:
+ if not self._raw_credential and new_file_system != self.file_system_name:
+ raise ValueError("please provide the sas token for the new file")
+ if not self._raw_credential and new_file_system == self.file_system_name:
+ new_file_sas = self._query_str.strip('?')
+
+ new_file_client = DataLakeFileClient(
+ "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path,
+ credential=self._raw_credential or new_file_sas,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ _location_mode=self._location_mode, require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+ await new_file_client._rename_path( # pylint: disable=protected-access
+ '/{}/{}{}'.format(quote(unquote(self.file_system_name)),
+ quote(unquote(self.path_name)),
+ self._query_str),
+ **kwargs)
+ return new_file_client
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_lease_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_lease_async.py
new file mode 100644
index 00000000000..cb8b79889a0
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_lease_async.py
@@ -0,0 +1,243 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any,
+ TypeVar, TYPE_CHECKING
+)
+from ....blob.aio import BlobLeaseClient
+from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase
+
+
+if TYPE_CHECKING:
+ FileSystemClient = TypeVar("FileSystemClient")
+ DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient")
+ DataLakeFileClient = TypeVar("DataLakeFileClient")
+
+
+class DataLakeLeaseClient(DataLakeLeaseClientBase):
+ """Creates a new DataLakeLeaseClient.
+
+ This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
+
+ :ivar str id:
+ The ID of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired.
+ :ivar str etag:
+ The ETag of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired or modified.
+ :ivar ~datetime.datetime last_modified:
+ The last modified timestamp of the lease currently being maintained.
+ This will be `None` if no lease has yet been acquired or modified.
+
+ :param client:
+ The client of the file system, directory, or file to lease.
+ :type client: ~azure.storage.filedatalake.aio.FileSystemClient or
+ ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient
+ :param str lease_id:
+ A string representing the lease ID of an existing lease. This value does not
+ need to be specified in order to acquire a new lease, or break one.
+ """
+ def __init__(
+ self, client, lease_id=None
+ ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
+ # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None
+ super(DataLakeLeaseClient, self).__init__(client, lease_id)
+
+ if hasattr(client, '_blob_client'):
+ _client = client._blob_client # type: ignore # pylint: disable=protected-access
+ elif hasattr(client, '_container_client'):
+ _client = client._container_client # type: ignore # pylint: disable=protected-access
+ else:
+ raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.")
+
+ self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id)
+
+ def __enter__(self):
+ raise TypeError("Async lease must use 'async with'.")
+
+ def __exit__(self, *args):
+ self.release()
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, *args):
+ await self.release()
+
+ async def acquire(self, lease_duration=-1, **kwargs):
+ # type: (int, Optional[int], **Any) -> None
+ """Requests a new lease.
+
+ If the file/file system does not have an active lease, the DataLake service creates a
+ lease on the file/file system and returns a new lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs)
+ self._update_lease_client_attributes()
+
+ async def renew(self, **kwargs):
+ # type: (Any) -> None
+ """Renews the lease.
+
+ The lease can be renewed if the lease ID specified in the
+ lease client matches that associated with the file system or file. Note that
+ the lease may be renewed even if it has expired as long as the file system
+ or file has not been leased again since the expiration of that lease. When you
+ renew a lease, the lease duration clock resets.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ await self._blob_lease_client.renew(**kwargs)
+ self._update_lease_client_attributes()
+
+ async def release(self, **kwargs):
+ # type: (Any) -> None
+ """Release the lease.
+
+ The lease may be released if the client lease id specified matches
+ that associated with the file system or file. Releasing the lease allows another client
+ to immediately acquire the lease for the file system or file as soon as the release is complete.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ await self._blob_lease_client.release(**kwargs)
+ self._update_lease_client_attributes()
+
+ async def change(self, proposed_lease_id, **kwargs):
+ # type: (str, Any) -> None
+ """Change the lease ID of an active lease.
+
+ :param str proposed_lease_id:
+ Proposed lease ID, in a GUID string format. The DataLake service returns 400
+ (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs)
+ self._update_lease_client_attributes()
+
+ async def break_lease(self, lease_break_period=None, **kwargs):
+ # type: (Optional[int], Any) -> int
+ """Break the lease, if the file system or file has an active lease.
+
+ Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+ the request is not required to specify a matching lease ID. When a lease
+ is broken, the lease break period is allowed to elapse, during which time
+ no lease operation except break and release can be performed on the file system or file.
+ When a lease is successfully broken, the response indicates the interval
+ in seconds until a new lease can be acquired.
+
+ :param int lease_break_period:
+ This is the proposed duration of seconds that the lease
+ should continue before it is broken, between 0 and 60 seconds. This
+ break period is only used if it is shorter than the time remaining
+ on the lease. If longer, the time remaining on the lease is used.
+ A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break
+ period. If this header does not appear with a break
+ operation, a fixed-duration lease breaks after the remaining lease
+ period elapses, and an infinite lease breaks immediately.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Approximate time remaining in the lease period, in seconds.
+ :rtype: int
+ """
+ await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py
new file mode 100644
index 00000000000..db417b1d4eb
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py
@@ -0,0 +1,505 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+from typing import Optional, Any, Dict
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import AsyncPipeline
+
+from ....blob.aio import BlobServiceClient
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from .._deserialize import get_datalake_service_properties
+from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin
+from ._file_system_client_async import FileSystemClient
+from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase
+from .._shared.policies_async import ExponentialRetry
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._models import FileSystemPropertiesPaged
+from .._models import UserDelegationKey, LocationMode
+
+
+class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase):
+ """A client to interact with the DataLake Service at the account level.
+
+ This client provides operations to retrieve and configure the account properties
+ as well as list, create and delete file systems within the account.
+ For operations relating to a specific file system, directory or file, clients for those entities
+ can also be retrieved using the `get_client` functions.
+
+ :ivar str url:
+ The full endpoint URL to the datalake service endpoint.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URL to the DataLake storage account. Any other entities included
+ in the URL path (e.g. file system or file) will be discarded. This URL can be optionally
+ authenticated with a SAS token.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START create_datalake_service_client]
+ :end-before: [END create_datalake_service_client]
+ :language: python
+ :dedent: 4
+ :caption: Creating the DataLakeServiceClient from connection string.
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START create_datalake_service_client_oauth]
+ :end-before: [END create_datalake_service_client_oauth]
+ :language: python
+ :dedent: 4
+ :caption: Creating the DataLakeServiceClient with Azure Identity credentials.
+ """
+
+ def __init__(
+ self, account_url, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+ super(DataLakeServiceClient, self).__init__(
+ account_url,
+ credential=credential,
+ **kwargs
+ )
+ self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs)
+ self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access
+ self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline)
+ self._loop = kwargs.get('loop', None)
+
+ async def __aenter__(self):
+ await self._blob_service_client.__aenter__()
+ return self
+
+ async def __aexit__(self, *args):
+ await self._blob_service_client.close()
+
+ async def close(self):
+ # type: () -> None
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ await self._blob_service_client.close()
+
+ async def get_user_delegation_key(self, key_start_time, # type: datetime
+ key_expiry_time, # type: datetime
+ **kwargs # type: Any
+ ):
+ # type: (...) -> UserDelegationKey
+ """
+ Obtain a user delegation key for the purpose of signing SAS tokens.
+ A token credential must be present on the service object for this request to succeed.
+
+ :param ~datetime.datetime key_start_time:
+ A DateTime value. Indicates when the key becomes valid.
+ :param ~datetime.datetime key_expiry_time:
+ A DateTime value. Indicates when the key stops being valid.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: The user delegation key.
+ :rtype: ~azure.storage.filedatalake.UserDelegationKey
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START get_user_delegation_key]
+ :end-before: [END get_user_delegation_key]
+ :language: python
+ :dedent: 8
+ :caption: Get user delegation key from datalake service client.
+ """
+ delegation_key = await self._blob_service_client.get_user_delegation_key(
+ key_start_time=key_start_time,
+ key_expiry_time=key_expiry_time,
+ **kwargs) # pylint: disable=protected-access
+ return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access
+
+ def list_file_systems(self, name_starts_with=None, # type: Optional[str]
+ include_metadata=None, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> ItemPaged[FileSystemProperties]
+ """Returns a generator to list the file systems under the specified account.
+
+ The generator will lazily follow the continuation tokens returned by
+ the service and stop when all file systems have been returned.
+
+ :param str name_starts_with:
+ Filters the results to return only file systems whose names
+ begin with the specified prefix.
+ :param bool include_metadata:
+ Specifies that file system metadata be returned in the response.
+ The default value is `False`.
+ :keyword int results_per_page:
+ The maximum number of file system names to retrieve per API
+ call. If the request does not specify the server will return up to 5,000 items per page.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword bool include_deleted:
+ Specifies that deleted file systems to be returned in the response. This is for file system restore enabled
+ account. The default value is `False`.
+ .. versionadded:: 12.3.0
+ :returns: An iterable (auto-paging) of FileSystemProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START list_file_systems]
+ :end-before: [END list_file_systems]
+ :language: python
+ :dedent: 8
+ :caption: Listing the file systems in the datalake service.
+ """
+ item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with,
+ include_metadata=include_metadata,
+ **kwargs) # pylint: disable=protected-access
+ item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access
+ return item_paged
+
+ async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str]
+ metadata=None, # type: Optional[Dict[str, str]]
+ public_access=None, # type: Optional[PublicAccess]
+ **kwargs):
+ # type: (...) -> FileSystemClient
+ """Creates a new file system under the specified account.
+
+ If the file system with the same name already exists, a ResourceExistsError will
+ be raised. This method returns a client with which to interact with the newly
+ created file system.
+
+ :param str file_system:
+ The name of the file system to create.
+ :param metadata:
+ A dict with name-value pairs to associate with the
+ file system as metadata. Example: `{'Category':'test'}`
+ :type metadata: dict(str, str)
+ :param public_access:
+ Possible values include: file system, file.
+ :type public_access: ~azure.storage.filedatalake.PublicAccess
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START create_file_system_from_service_client]
+ :end-before: [END create_file_system_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Creating a file system in the datalake service.
+ """
+ file_system_client = self.get_file_system_client(file_system)
+ await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs)
+ return file_system_client
+
+ async def _rename_file_system(self, name, new_name, **kwargs):
+ # type: (str, str, **Any) -> FileSystemClient
+ """Renames a filesystem.
+
+ Operation is successful only if the source filesystem exists.
+
+ :param str name:
+ The name of the filesystem to rename.
+ :param str new_name:
+ The new filesystem name the user wants to rename to.
+ :keyword lease:
+ Specify this to perform only if the lease ID given
+ matches the active lease ID of the source filesystem.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+ """
+ await self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access
+ renamed_file_system = self.get_file_system_client(new_name)
+ return renamed_file_system
+
+ async def undelete_file_system(self, name, deleted_version, **kwargs):
+ # type: (str, str, **Any) -> FileSystemClient
+ """Restores soft-deleted filesystem.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ .. versionadded:: 12.3.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :param str name:
+ Specifies the name of the deleted filesystem to restore.
+ :param str deleted_version:
+ Specifies the version of the deleted filesystem to restore.
+ :keyword str new_name:
+ The new name for the deleted filesystem to be restored to.
+ If not specified "name" will be used as the restored filesystem name.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+ """
+ new_name = kwargs.pop('new_name', None)
+ await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access
+ file_system = self.get_file_system_client(new_name or name)
+ return file_system
+
+ async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str]
+ **kwargs):
+ # type: (...) -> FileSystemClient
+ """Marks the specified file system for deletion.
+
+ The file system and any files contained within it are later deleted during garbage collection.
+ If the file system is not found, a ResourceNotFoundError will be raised.
+
+ :param file_system:
+ The file system to delete. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :keyword lease:
+ If specified, delete_file_system only succeeds if the
+ file system's lease is active and matches this ID.
+ Required if the file system has an active lease.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START delete_file_system_from_service_client]
+ :end-before: [END delete_file_system_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Deleting a file system in the datalake service.
+ """
+ file_system_client = self.get_file_system_client(file_system)
+ await file_system_client.delete_file_system(**kwargs)
+ return file_system_client
+
+ def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str]
+ ):
+ # type: (...) -> FileSystemClient
+ """Get a client to interact with the specified file system.
+
+ The file system need not already exist.
+
+ :param file_system:
+ The file system. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :returns: A FileSystemClient.
+ :rtype: ~azure.storage.filedatalake.aio.FileSystemClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START create_file_system_client_from_service]
+ :end-before: [END create_file_system_client_from_service]
+ :language: python
+ :dedent: 8
+ :caption: Getting the file system client to interact with a specific file system.
+ """
+ try:
+ file_system_name = file_system.name
+ except AttributeError:
+ file_system_name = file_system
+
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return FileSystemClient(self.url, file_system_name, credential=self._raw_credential,
+ _configuration=self._config,
+ _pipeline=self._pipeline, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str]
+ directory # type: Union[DirectoryProperties, str]
+ ):
+ # type: (...) -> DataLakeDirectoryClient
+ """Get a client to interact with the specified directory.
+
+ The directory need not already exist.
+
+ :param file_system:
+ The file system that the directory is in. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START get_directory_client_from_service_client]
+ :end-before: [END get_directory_client_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Getting the directory client to interact with a specific directory.
+ """
+ try:
+ file_system_name = file_system.name
+ except AttributeError:
+ file_system_name = file_system
+ try:
+ directory_name = directory.name
+ except AttributeError:
+ directory_name = directory
+
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name,
+ credential=self._raw_credential,
+ _configuration=self._config, _pipeline=self._pipeline,
+ _hosts=self._hosts,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function
+ )
+
+ def get_file_client(self, file_system, # type: Union[FileSystemProperties, str]
+ file_path # type: Union[FileProperties, str]
+ ):
+ # type: (...) -> DataLakeFileClient
+ """Get a client to interact with the specified file.
+
+ The file need not already exist.
+
+ :param file_system:
+ The file system that the file is in. This can either be the name of the file system,
+ or an instance of FileSystemProperties.
+ :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties
+ :param file_path:
+ The file with which to interact. This can either be the full path of the file(from the root directory),
+ or an instance of FileProperties. eg. directory/subdirectory/file
+ :type file_path: str or ~azure.storage.filedatalake.FileProperties
+ :returns: A DataLakeFileClient.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_service_async.py
+ :start-after: [START get_file_client_from_service_client]
+ :end-before: [END get_file_client_from_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Getting the file client to interact with a specific file.
+ """
+ try:
+ file_system_name = file_system.name
+ except AttributeError:
+ file_system_name = file_system
+ try:
+ file_path = file_path.name
+ except AttributeError:
+ pass
+
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeFileClient(
+ self.url, file_system_name, file_path=file_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ async def set_service_properties(self, **kwargs):
+ # type: (**Any) -> None
+ """Sets the properties of a storage account's Datalake service, including
+ Azure Storage Analytics.
+
+ If an element (e.g. analytics_logging) is left as None, the
+ existing settings on the service for that functionality are preserved.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ :keyword analytics_logging:
+ Groups the Azure Analytics Logging settings.
+ :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging
+ :keyword hour_metrics:
+ The hour metrics settings provide a summary of request
+ statistics grouped by API in hourly aggregates.
+ :type hour_metrics: ~azure.storage.filedatalake.Metrics
+ :keyword minute_metrics:
+ The minute metrics settings provide request statistics
+ for each minute.
+ :type minute_metrics: ~azure.storage.filedatalake.Metrics
+ :keyword cors:
+ You can include up to five CorsRule elements in the
+ list. If an empty list is specified, all CORS rules will be deleted,
+ and CORS will be disabled for the service.
+ :type cors: list[~azure.storage.filedatalake.CorsRule]
+ :keyword str target_version:
+ Indicates the default version to use for requests if an incoming
+ request's version is not specified.
+ :keyword delete_retention_policy:
+ The delete retention policy specifies whether to retain deleted files/directories.
+ It also specifies the number of days and versions of file/directory to keep.
+ :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy
+ :keyword static_website:
+ Specifies whether the static website feature is enabled,
+ and if yes, indicates the index document and 404 error document to use.
+ :type static_website: ~azure.storage.filedatalake.StaticWebsite
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ return await self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access
+
+ async def get_service_properties(self, **kwargs):
+ # type: (**Any) -> Dict[str, Any]
+ """Gets the properties of a storage account's datalake service, including
+ Azure Storage Analytics.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An object containing datalake service properties such as
+ analytics logging, hour/minute metrics, cors rules, etc.
+ :rtype: Dict[str, Any]
+ """
+ props = await self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access
+ return get_datalake_service_properties(props)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_download_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_download_async.py
new file mode 100644
index 00000000000..5685478d3e2
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_download_async.py
@@ -0,0 +1,59 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+from typing import AsyncIterator
+
+from .._deserialize import from_blob_properties
+
+
+class StorageStreamDownloader(object):
+ """A streaming object to download from Azure Storage.
+
+ :ivar str name:
+ The name of the file being downloaded.
+ :ivar ~azure.storage.filedatalake.FileProperties properties:
+ The properties of the file being downloaded. If only a range of the data is being
+ downloaded, this will be reflected in the properties.
+ :ivar int size:
+ The size of the total data in the stream. This will be the byte range if speficied,
+ otherwise the total size of the file.
+ """
+
+ def __init__(self, downloader):
+ self._downloader = downloader
+ self.name = self._downloader.name
+ self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access
+ self.size = self._downloader.size
+
+ def __len__(self):
+ return self.size
+
+ def chunks(self):
+ # type: () -> AsyncIterator[bytes]
+ """Iterate over chunks in the download stream.
+
+ :rtype: AsyncIterator[bytes]
+ """
+ return self._downloader.chunks()
+
+ async def readall(self):
+ """Download the contents of this file.
+
+ This operation is blocking until all data is downloaded.
+ :rtype: bytes or str
+ """
+ return await self._downloader.readall()
+
+ async def readinto(self, stream):
+ """Download the contents of this file to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream. The stream must be seekable if the download
+ uses more than one parallel connection.
+ :returns: The number of bytes read.
+ :rtype: int
+ """
+ return await self._downloader.readinto(stream)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_file_system_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_file_system_client_async.py
new file mode 100644
index 00000000000..dc36203e9a3
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_file_system_client_async.py
@@ -0,0 +1,867 @@
+# pylint: disable=too-many-lines
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+import functools
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Dict, TYPE_CHECKING
+)
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator import distributed_trace
+
+from azure.core.pipeline import AsyncPipeline
+from azure.core.async_paging import AsyncItemPaged
+
+from azure.core.tracing.decorator_async import distributed_trace_async
+from ....blob.aio import ContainerClient
+from .._deserialize import process_storage_error, is_file_path
+from .._generated.models import ListBlobsIncludeItem
+
+from ._data_lake_file_client_async import DataLakeFileClient
+from ._data_lake_directory_client_async import DataLakeDirectoryClient
+from ._data_lake_lease_async import DataLakeLeaseClient
+from .._deserialize import deserialize_path_properties
+from .._file_system_client import FileSystemClient as FileSystemClientBase
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin
+from .._shared.policies_async import ExponentialRetry
+from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties, DeletedPathProperties
+from ._list_paths_helper import DeletedPathPropertiesPaged
+
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from .._models import ( # pylint: disable=unused-import
+ ContentSettings)
+
+
+class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase):
+ """A client to interact with a specific file system, even if that file system
+ may not yet exist.
+
+ For operations relating to a specific directory or file within this file system, a directory client or file client
+ can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions.
+
+ :ivar str url:
+ The full endpoint URL to the file system, including SAS token if used.
+ :ivar str primary_endpoint:
+ The full primary endpoint URL.
+ :ivar str primary_hostname:
+ The hostname of the primary endpoint.
+ :param str account_url:
+ The URI to the storage account.
+ :param file_system_name:
+ The file system for the directory or files.
+ :type file_system_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string,
+ an instance of a AzureSasCredential from azure.core.credentials, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+ - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START create_file_system_client_from_service]
+ :end-before: [END create_file_system_client_from_service]
+ :language: python
+ :dedent: 8
+ :caption: Get a FileSystemClient from an existing DataLakeServiceClient.
+ """
+
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+ super(FileSystemClient, self).__init__(
+ account_url,
+ file_system_name=file_system_name,
+ credential=credential,
+ **kwargs)
+ # to override the class field _container_client sync version
+ kwargs.pop('_hosts', None)
+ self._container_client = ContainerClient(self._blob_account_url, file_system_name,
+ credential=credential,
+ _hosts=self._container_client._hosts,# pylint: disable=protected-access
+ **kwargs) # type: ignore # pylint: disable=protected-access
+ self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline)
+ self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url,
+ file_system=file_system_name,
+ pipeline=self._pipeline)
+ self._loop = kwargs.get('loop', None)
+
+ async def __aexit__(self, *args):
+ await self._container_client.close()
+ await super(FileSystemClient, self).__aexit__(*args)
+
+ async def close(self):
+ # type: () -> None
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ await self._container_client.close()
+ await self.__aexit__()
+
+ @distributed_trace_async
+ async def acquire_lease(
+ self, lease_duration=-1, # type: int
+ lease_id=None, # type: Optional[str]
+ **kwargs
+ ):
+ # type: (...) -> DataLakeLeaseClient
+ """
+ Requests a new lease. If the file system does not have an active lease,
+ the DataLake service creates a lease on the file system and returns a new
+ lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The DataLake service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START acquire_lease_on_file_system]
+ :end-before: [END acquire_lease_on_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Acquiring a lease on the file_system.
+ """
+ lease = DataLakeLeaseClient(self, lease_id=lease_id)
+ await lease.acquire(lease_duration=lease_duration, **kwargs)
+ return lease
+
+ @distributed_trace_async
+ async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]]
+ public_access=None, # type: Optional[PublicAccess]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Creates a new file system under the specified account.
+
+ If the file system with the same name already exists, a ResourceExistsError will
+ be raised. This method returns a client with which to interact with the newly
+ created file system.
+
+ :param metadata:
+ A dict with name-value pairs to associate with the
+ file system as metadata. Example: `{'Category':'test'}`
+ :type metadata: dict(str, str)
+ :param public_access:
+ To specify whether data in the file system may be accessed publicly and the level of access.
+ :type public_access: ~azure.storage.filedatalake.PublicAccess
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.aio.FileSystemClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START create_file_system]
+ :end-before: [END create_file_system]
+ :language: python
+ :dedent: 16
+ :caption: Creating a file system in the datalake service.
+ """
+ return await self._container_client.create_container(metadata=metadata,
+ public_access=public_access,
+ **kwargs)
+
+ @distributed_trace_async
+ async def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a file system exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return await self._container_client.exists(**kwargs)
+
+ @distributed_trace_async
+ async def _rename_file_system(self, new_name, **kwargs):
+ # type: (str, **Any) -> FileSystemClient
+ """Renames a filesystem.
+
+ Operation is successful only if the source filesystem exists.
+
+ :param str new_name:
+ The new filesystem name the user wants to rename to.
+ :keyword lease:
+ Specify this to perform only if the lease ID given
+ matches the active lease ID of the source filesystem.
+ :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.filedatalake.FileSystemClient
+ """
+ await self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access
+ renamed_file_system = FileSystemClient(
+ "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name,
+ credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+ return renamed_file_system
+
+ @distributed_trace_async
+ async def delete_file_system(self, **kwargs):
+ # type: (Any) -> None
+ """Marks the specified file system for deletion.
+
+ The file system and any files contained within it are later deleted during garbage collection.
+ If the file system is not found, a ResourceNotFoundError will be raised.
+
+ :keyword lease:
+ If specified, delete_file_system only succeeds if the
+ file system's lease is active and matches this ID.
+ Required if the file system has an active lease.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START delete_file_system]
+ :end-before: [END delete_file_system]
+ :language: python
+ :dedent: 16
+ :caption: Deleting a file system in the datalake service.
+ """
+ await self._container_client.delete_container(**kwargs)
+
+ @distributed_trace_async
+ async def get_file_system_properties(self, **kwargs):
+ # type: (Any) -> FileSystemProperties
+ """Returns all user-defined metadata and system properties for the specified
+ file system. The data returned does not include the file system's list of paths.
+
+ :keyword lease:
+ If specified, get_file_system_properties only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Properties for the specified file system within a file system object.
+ :rtype: ~azure.storage.filedatalake.FileSystemProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START get_file_system_properties]
+ :end-before: [END get_file_system_properties]
+ :language: python
+ :dedent: 16
+ :caption: Getting properties on the file system.
+ """
+ container_properties = await self._container_client.get_container_properties(**kwargs)
+ return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access
+
+ @distributed_trace_async
+ async def set_file_system_metadata( # type: ignore
+ self, metadata, # type: Dict[str, str]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets one or more user-defined name-value pairs for the specified
+ file system. Each call to this operation replaces all existing metadata
+ attached to the file system. To remove all metadata from the file system,
+ call this operation with no metadata dict.
+
+ :param metadata:
+ A dict containing name-value pairs to associate with the file system as
+ metadata. Example: {'category':'test'}
+ :type metadata: dict[str, str]
+ :keyword lease:
+ If specified, set_file_system_metadata only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: file system-updated property dict (Etag and last modified).
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START set_file_system_metadata]
+ :end-before: [END set_file_system_metadata]
+ :language: python
+ :dedent: 16
+ :caption: Setting metadata on the container.
+ """
+ return await self._container_client.set_container_metadata(metadata=metadata, **kwargs)
+
+ @distributed_trace_async
+ async def set_file_system_access_policy(
+ self, signed_identifiers, # type: Dict[str, AccessPolicy]
+ public_access=None, # type: Optional[Union[str, PublicAccess]]
+ **kwargs
+ ): # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets the permissions for the specified file system or stored access
+ policies that may be used with Shared Access Signatures. The permissions
+ indicate whether files in a file system may be accessed publicly.
+
+ :param signed_identifiers:
+ A dictionary of access policies to associate with the file system. The
+ dictionary may contain up to 5 elements. An empty dictionary
+ will clear the access policies set on the service.
+ :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy]
+ :param ~azure.storage.filedatalake.PublicAccess public_access:
+ To specify whether data in the file system may be accessed publicly and the level of access.
+ :keyword lease:
+ Required if the file system has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified date/time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: filesystem-updated property dict (Etag and last modified).
+ :rtype: dict[str, str or ~datetime.datetime]
+ """
+ return await self._container_client.set_container_access_policy(signed_identifiers,
+ public_access=public_access, **kwargs)
+
+ @distributed_trace_async
+ async def get_file_system_access_policy(self, **kwargs):
+ # type: (Any) -> Dict[str, Any]
+ """Gets the permissions for the specified file system.
+ The permissions indicate whether file system data may be accessed publicly.
+
+ :keyword lease:
+ If specified, get_file_system_access_policy only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Access policy information in a dict.
+ :rtype: dict[str, Any]
+ """
+ access_policy = await self._container_client.get_container_access_policy(**kwargs)
+ return {
+ 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access
+ 'signed_identifiers': access_policy['signed_identifiers']
+ }
+
+ @distributed_trace
+ def get_paths(self, path=None, # type: Optional[str]
+ recursive=True, # type: Optional[bool]
+ max_results=None, # type: Optional[int]
+ **kwargs):
+ # type: (...) -> AsyncItemPaged[PathProperties]
+ """Returns a generator to list the paths(could be files or directories) under the specified file system.
+ The generator will lazily follow the continuation tokens returned by
+ the service.
+
+ :param str path:
+ Filters the results to return only paths under the specified path.
+ :param int max_results:
+ An optional value that specifies the maximum
+ number of items to return per page. If omitted or greater than 5,000, the
+ response will include up to 5,000 items per page.
+ :keyword upn:
+ Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the user identity values returned
+ in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false. Note that group and
+ application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of PathProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START get_paths_in_file_system]
+ :end-before: [END get_paths_in_file_system]
+ :language: python
+ :dedent: 12
+ :caption: List the blobs in the file system.
+ """
+ timeout = kwargs.pop('timeout', None)
+ return self._client.file_system.list_paths(
+ recursive=recursive,
+ max_results=max_results,
+ path=path,
+ timeout=timeout,
+ cls=deserialize_path_properties,
+ **kwargs)
+
+ @distributed_trace_async
+ async def create_directory(self, directory, # type: Union[DirectoryProperties, str]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Create directory
+
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START create_directory_from_file_system]
+ :end-before: [END create_directory_from_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Create directory in the file system.
+ """
+ directory_client = self.get_directory_client(directory)
+ await directory_client.create_directory(metadata=metadata, **kwargs)
+ return directory_client
+
+ @distributed_trace_async
+ async def delete_directory(self, directory, # type: Union[DirectoryProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeDirectoryClient
+ """
+ Marks the specified path for deletion.
+
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START delete_directory_from_file_system]
+ :end-before: [END delete_directory_from_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Delete directory in the file system.
+ """
+ directory_client = self.get_directory_client(directory)
+ await directory_client.delete_directory(**kwargs)
+ return directory_client
+
+ @distributed_trace_async
+ async def create_file(self, file, # type: Union[FileProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeFileClient
+ """
+ Create file
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties.
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :param ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :param metadata:
+ Name-value pairs associated with the file as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword str permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START create_file_from_file_system]
+ :end-before: [END create_file_from_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Create file in the file system.
+ """
+ file_client = self.get_file_client(file)
+ await file_client.create_file(**kwargs)
+ return file_client
+
+ @distributed_trace_async
+ async def delete_file(self, file, # type: Union[FileProperties, str]
+ **kwargs):
+ # type: (...) -> DataLakeFileClient
+ """
+ Marks the specified file for deletion.
+
+ :param file:
+ The file with which to interact. This can either be the name of the file,
+ or an instance of FileProperties.
+ :type file: str or ~azure.storage.filedatalake.FileProperties
+ :keyword lease:
+ Required if the file has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: DataLakeFileClient
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START delete_file_from_file_system]
+ :end-before: [END delete_file_from_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Delete file in the file system.
+ """
+ file_client = self.get_file_client(file)
+ await file_client.delete_file(**kwargs)
+ return file_client
+
+ @distributed_trace_async
+ async def _undelete_path(self, deleted_path_name, deletion_id, **kwargs):
+ # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient]
+ """Restores soft-deleted path.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ :param str deleted_path_name:
+ Specifies the name of the deleted container to restore.
+ :param str deletion_id:
+ Specifies the version of the deleted container to restore.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient
+ or azure.storage.file.datalake.aio.DataLakeFileClient
+ """
+ _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id)
+
+ pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ path_client = AzureDataLakeStorageRESTAPI(
+ url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline)
+ try:
+ is_file = await path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs)
+ if is_file:
+ return self.get_file_client(deleted_path_name)
+ return self.get_directory_client(deleted_path_name)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ def _get_root_directory_client(self):
+ # type: () -> DataLakeDirectoryClient
+ """Get a client to interact with the root directory.
+
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+ """
+ return self.get_directory_client('/')
+
+ def get_directory_client(self, directory # type: Union[DirectoryProperties, str]
+ ):
+ # type: (...) -> DataLakeDirectoryClient
+ """Get a client to interact with the specified directory.
+
+ The directory need not already exist.
+
+ :param directory:
+ The directory with which to interact. This can either be the name of the directory,
+ or an instance of DirectoryProperties.
+ :type directory: str or ~azure.storage.filedatalake.DirectoryProperties
+ :returns: A DataLakeDirectoryClient.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START get_directory_client_from_file_system]
+ :end-before: [END get_directory_client_from_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Getting the directory client to interact with a specific directory.
+ """
+ try:
+ directory_name = directory.get('name')
+ except AttributeError:
+ directory_name = str(directory)
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name,
+ credential=self._raw_credential,
+ _configuration=self._config, _pipeline=_pipeline,
+ _hosts=self._hosts,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function,
+ loop=self._loop
+ )
+
+ def get_file_client(self, file_path # type: Union[FileProperties, str]
+ ):
+ # type: (...) -> DataLakeFileClient
+ """Get a client to interact with the specified file.
+
+ The file need not already exist.
+
+ :param file_path:
+ The file with which to interact. This can either be the path of the file(from root directory),
+ or an instance of FileProperties. eg. directory/subdirectory/file
+ :type file_path: str or ~azure.storage.filedatalake.FileProperties
+ :returns: A DataLakeFileClient.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/datalake_samples_file_system_async.py
+ :start-after: [START get_file_client_from_file_system]
+ :end-before: [END get_file_client_from_file_system]
+ :language: python
+ :dedent: 12
+ :caption: Getting the file client to interact with a specific file.
+ """
+ try:
+ file_path = file_path.get('name')
+ except AttributeError:
+ file_path = str(file_path)
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return DataLakeFileClient(
+ self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential,
+ _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline,
+ require_encryption=self.require_encryption,
+ key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function, loop=self._loop)
+
+ @distributed_trace
+ def list_deleted_paths(self, **kwargs):
+ # type: (Any) -> AsyncItemPaged[DeletedPathProperties]
+ """Returns a generator to list the deleted (file or directory) paths under the specified file system.
+ The generator will lazily follow the continuation tokens returned by
+ the service.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2020-06-12'.
+
+ :keyword str path_prefix:
+ Filters the results to return only paths under the specified path.
+ :keyword int max_results:
+ An optional value that specifies the maximum number of items to return per page.
+ If omitted or greater than 5,000, the response will include up to 5,000 items per page.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of DeletedPathProperties.
+ :rtype:
+ ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.DeletedPathProperties]
+ """
+ path_prefix = kwargs.pop('path_prefix', None)
+ results_per_page = kwargs.pop('max_results', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment,
+ showonly=ListBlobsIncludeItem.deleted,
+ timeout=timeout,
+ **kwargs)
+ return AsyncItemPaged(
+ command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged,
+ results_per_page=results_per_page, **kwargs)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_list_paths_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_list_paths_helper.py
new file mode 100644
index 00000000000..03831a5ecc5
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_list_paths_helper.py
@@ -0,0 +1,111 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+from azure.core.exceptions import HttpResponseError
+from azure.core.async_paging import AsyncPageIterator
+
+from .._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code
+from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+
+from .._shared.models import DictMixin
+from .._shared.response_handlers import return_context_and_deserialized
+
+
+class DeletedPathPropertiesPaged(AsyncPageIterator):
+ """An Iterable of deleted path properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A path name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties)
+ :ivar str container: The container that the paths are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+ :param callable command: Function to retrieve the next page of items.
+ """
+ def __init__(
+ self, command,
+ container=None,
+ prefix=None,
+ results_per_page=None,
+ continuation_token=None,
+ delimiter=None,
+ location_mode=None):
+ super(DeletedPathPropertiesPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.prefix = prefix
+ self.marker = None
+ self.results_per_page = results_per_page
+ self.container = container
+ self.delimiter = delimiter
+ self.current_page = None
+ self.location_mode = location_mode
+
+ async def _get_next_cb(self, continuation_token):
+ try:
+ return await self._command(
+ prefix=self.prefix,
+ marker=continuation_token or None,
+ max_results=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.prefix = self._response.prefix
+ self.marker = self._response.marker
+ self.results_per_page = self._response.max_results
+ self.container = self._response.container_name
+ self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+ self.current_page = [self._build_item(item) for item in self.current_page]
+ self.delimiter = self._response.delimiter
+
+ return self._response.next_marker or None, self.current_page
+
+ def _build_item(self, item):
+ if isinstance(item, BlobItemInternal):
+ file_props = get_deleted_path_properties_from_generated_code(item)
+ file_props.file_system = self.container
+ return file_props
+ if isinstance(item, GenBlobPrefix):
+ return DirectoryPrefix(
+ container=self.container,
+ prefix=item.name,
+ results_per_page=self.results_per_page,
+ location_mode=self.location_mode)
+ return item
+
+
+class DirectoryPrefix(DictMixin):
+ """Directory prefix.
+
+ :ivar str name: Name of the deleted directory.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar str file_system: The file system that the deleted paths are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+ """
+ def __init__(self, **kwargs):
+ self.name = kwargs.get('prefix')
+ self.results_per_page = kwargs.get('results_per_page')
+ self.file_system = kwargs.get('container')
+ self.delimiter = kwargs.get('delimiter')
+ self.location_mode = kwargs.get('location_mode')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_models.py
new file mode 100644
index 00000000000..50cc03bda9e
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_models.py
@@ -0,0 +1,41 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+from ....blob.aio._models import ContainerPropertiesPaged
+from .._models import FileSystemProperties
+
+
+class FileSystemPropertiesPaged(ContainerPropertiesPaged):
+ """An Iterable of File System properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A file system name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties)
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str prefix: Filters the results to return only file systems whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of file system names to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(FileSystemPropertiesPaged, self).__init__(
+ *args,
+ **kwargs
+ )
+
+ @staticmethod
+ def _build_item(item):
+ return FileSystemProperties._from_generated(item) # pylint: disable=protected-access
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_path_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_path_client_async.py
new file mode 100644
index 00000000000..f8344007bb5
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_path_client_async.py
@@ -0,0 +1,727 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+from datetime import datetime
+from typing import Any, Dict, Union
+
+from azure.core.exceptions import AzureError, HttpResponseError
+from ....blob.aio import BlobClient
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin
+from .._path_client import PathClient as PathClientBase
+from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \
+ AccessControlChangeCounters, AccessControlChanges
+from .._generated.aio import AzureDataLakeStorageRESTAPI
+from ._data_lake_lease_async import DataLakeLeaseClient
+from .._deserialize import process_storage_error
+from .._shared.policies_async import ExponentialRetry
+
+_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = (
+ 'The require_encryption flag is set, but encryption is not supported'
+ ' for this method.')
+
+
+class PathClient(AsyncStorageAccountHostsMixin, PathClientBase):
+ def __init__(
+ self, account_url, # type: str
+ file_system_name, # type: str
+ path_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+
+ super(PathClient, self).__init__(account_url, # pylint: disable=specify-parameter-names-in-call
+ file_system_name, path_name,
+ credential=credential,
+ **kwargs) # type: ignore
+
+ kwargs.pop('_hosts', None)
+
+ self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=file_system_name,
+ blob_name=path_name,
+ credential=credential,
+ _hosts=self._blob_client._hosts, # pylint: disable=protected-access
+ **kwargs)
+
+ self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name,
+ pipeline=self._pipeline)
+ self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._blob_client.url,
+ file_system=file_system_name,
+ path=path_name,
+ pipeline=self._pipeline)
+ self._loop = kwargs.get('loop', None)
+
+ async def __aexit__(self, *args):
+ await self._blob_client.close()
+ await super(PathClient, self).__aexit__(*args)
+
+ async def close(self):
+ # type: () -> None
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ await self._blob_client.close()
+ await self.__aexit__()
+
+ async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Create directory or file
+
+ :param resource_type:
+ Required for Create File and Create Directory.
+ The value must be "file" or "directory". Possible values include:
+ 'directory', 'file'
+ :type resource_type: str
+ :param ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :param metadata:
+ Name-value pairs associated with the file/directory as metadata.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword str umask:
+ Optional and only valid if Hierarchical Namespace is enabled for the account.
+ When creating a file or directory and the parent folder does not have a default ACL,
+ the umask restricts the permissions of the file or directory to be created.
+ The resulting permission is given by p & ^u, where p is the permission and u is the umask.
+ For example, if p is 0777 and u is 0057, then the resulting permission is 0720.
+ The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027.
+ The umask must be specified in 4-digit octal notation (e.g. 0766).
+ :keyword permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ :type permissions: str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Dict[str, Union[str, datetime]]
+ """
+ options = self._create_path_options(
+ resource_type,
+ content_settings=content_settings,
+ metadata=metadata,
+ **kwargs)
+ try:
+ return await self._client.path.create(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def _delete(self, **kwargs):
+ # type: (**Any) -> Dict[Union[datetime, str]]
+ """
+ Marks the specified path for deletion.
+
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ options = self._delete_path_options(**kwargs)
+ try:
+ return await self._client.path.delete(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def set_access_control(self, owner=None, # type: Optional[str]
+ group=None, # type: Optional[str]
+ permissions=None, # type: Optional[str]
+ acl=None, # type: Optional[str]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """
+ Set the owner, group, permissions, or access control list for a path.
+
+ :param owner:
+ Optional. The owner of the file or directory.
+ :type owner: str
+ :param group:
+ Optional. The owning group of the file or directory.
+ :type group: str
+ :param permissions:
+ Optional and only valid if Hierarchical Namespace
+ is enabled for the account. Sets POSIX access permissions for the file
+ owner, the file owning group, and others. Each class may be granted
+ read, write, or execute permission. The sticky bit is also supported.
+ Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are
+ supported.
+ permissions and acl are mutually exclusive.
+ :type permissions: str
+ :param acl:
+ Sets POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ permissions and acl are mutually exclusive.
+ :type acl: str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword: response dict (Etag and last modified).
+ """
+ options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs)
+ try:
+ return await self._client.path.set_access_control(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def get_access_control(self, upn=None, # type: Optional[bool]
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+ """
+ Get the owner, group, permissions, or access control list for a path.
+
+ :param upn:
+ Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the user identity values returned
+ in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false. Note that group and
+ application Object IDs are not translated because they do not have
+ unique friendly names.
+ :type upn: bool
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword: response dict.
+ """
+ options = self._get_access_control_options(upn=upn, **kwargs)
+ try:
+ return await self._client.path.get_properties(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def set_access_control_recursive(self,
+ acl,
+ **kwargs):
+ # type: (str, **Any) -> AccessControlChangeResult
+ """
+ Sets the Access Control on a path and sub-paths.
+
+ :param acl:
+ Sets POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+ Callback where the caller can track progress of the operation
+ as well as collect paths that failed to change Access Control.
+ :keyword str continuation_token:
+ Optional continuation token that can be used to resume previously stopped operation.
+ :keyword int batch_size:
+ Optional. If data set size exceeds batch size then operation will be split into multiple
+ requests so that progress can be tracked. Batch size should be between 1 and 2000.
+ The default when unspecified is 2000.
+ :keyword int max_batches:
+ Optional. Defines maximum number of batches that single change Access Control operation can execute.
+ If maximum is reached before all sub-paths are processed,
+ then continuation token can be used to resume operation.
+ Empty value indicates that maximum number of batches in unbound and operation continues till end.
+ :keyword bool continue_on_failure:
+ If set to False, the operation will terminate quickly on encountering user errors (4XX).
+ If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+ the directory.
+ Continuation token will only be returned when continue_on_failure is True in case of user errors.
+ If not set the default value is False for this.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: A summary of the recursive operations, including the count of successes and failures,
+ as well as a continuation token in case the operation was terminated prematurely.
+ :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+ :raises ~azure.core.exceptions.AzureError:
+ User can restart the operation using continuation_token field of AzureError if the token is available.
+ """
+ if not acl:
+ raise ValueError("The Access Control List must be set for this operation")
+
+ progress_hook = kwargs.pop('progress_hook', None)
+ max_batches = kwargs.pop('max_batches', None)
+ options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs)
+ return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+ max_batches=max_batches)
+
+ async def update_access_control_recursive(self, acl, **kwargs):
+ # type: (str, **Any) -> AccessControlChangeResult
+ """
+ Modifies the Access Control on a path and sub-paths.
+
+ :param acl:
+ Modifies POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, a user or
+ group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type acl: str
+ :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+ Callback where the caller can track progress of the operation
+ as well as collect paths that failed to change Access Control.
+ :keyword str continuation_token:
+ Optional continuation token that can be used to resume previously stopped operation.
+ :keyword int batch_size:
+ Optional. If data set size exceeds batch size then operation will be split into multiple
+ requests so that progress can be tracked. Batch size should be between 1 and 2000.
+ The default when unspecified is 2000.
+ :keyword int max_batches:
+ Optional. Defines maximum number of batches that single,
+ change Access Control operation can execute.
+ If maximum is reached before all sub-paths are processed,
+ then continuation token can be used to resume operation.
+ Empty value indicates that maximum number of batches in unbound and operation continues till end.
+ :keyword bool continue_on_failure:
+ If set to False, the operation will terminate quickly on encountering user errors (4XX).
+ If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+ the directory.
+ Continuation token will only be returned when continue_on_failure is True in case of user errors.
+ If not set the default value is False for this.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: A summary of the recursive operations, including the count of successes and failures,
+ as well as a continuation token in case the operation was terminated prematurely.
+ :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+ :raises ~azure.core.exceptions.AzureError:
+ User can restart the operation using continuation_token field of AzureError if the token is available.
+ """
+ if not acl:
+ raise ValueError("The Access Control List must be set for this operation")
+
+ progress_hook = kwargs.pop('progress_hook', None)
+ max_batches = kwargs.pop('max_batches', None)
+ options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs)
+ return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+ max_batches=max_batches)
+
+ async def remove_access_control_recursive(self,
+ acl,
+ **kwargs):
+ # type: (str, **Any) -> AccessControlChangeResult
+ """
+ Removes the Access Control on a path and sub-paths.
+
+ :param acl:
+ Removes POSIX access control rights on files and directories.
+ The value is a comma-separated list of access control entries. Each
+ access control entry (ACE) consists of a scope, a type, and a user or
+ group identifier in the format "[scope:][type]:[id]".
+ :type acl: str
+ :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook:
+ Callback where the caller can track progress of the operation
+ as well as collect paths that failed to change Access Control.
+ :keyword str continuation_token:
+ Optional continuation token that can be used to resume previously stopped operation.
+ :keyword int batch_size:
+ Optional. If data set size exceeds batch size then operation will be split into multiple
+ requests so that progress can be tracked. Batch size should be between 1 and 2000.
+ The default when unspecified is 2000.
+ :keyword int max_batches:
+ Optional. Defines maximum number of batches that single change Access Control operation can execute.
+ If maximum is reached before all sub-paths are processed,
+ then continuation token can be used to resume operation.
+ Empty value indicates that maximum number of batches in unbound and operation continues till end.
+ :keyword bool continue_on_failure:
+ If set to False, the operation will terminate quickly on encountering user errors (4XX).
+ If True, the operation will ignore user errors and proceed with the operation on other sub-entities of
+ the directory.
+ Continuation token will only be returned when continue_on_failure is True in case of user errors.
+ If not set the default value is False for this.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: A summary of the recursive operations, including the count of successes and failures,
+ as well as a continuation token in case the operation was terminated prematurely.
+ :rtype: :~azure.storage.filedatalake.AccessControlChangeResult`
+ :raises ~azure.core.exceptions.AzureError:
+ User can restart the operation using continuation_token field of AzureError if the token is available.
+ """
+ if not acl:
+ raise ValueError("The Access Control List must be set for this operation")
+
+ progress_hook = kwargs.pop('progress_hook', None)
+ max_batches = kwargs.pop('max_batches', None)
+ options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs)
+ return await self._set_access_control_internal(options=options, progress_hook=progress_hook,
+ max_batches=max_batches)
+
+ async def _set_access_control_internal(self, options, progress_hook, max_batches=None):
+ try:
+ continue_on_failure = options.get('force_flag')
+ total_directories_successful = 0
+ total_files_success = 0
+ total_failure_count = 0
+ batch_count = 0
+ last_continuation_token = None
+ current_continuation_token = None
+ continue_operation = True
+ while continue_operation:
+ headers, resp = await self._client.path.set_access_control_recursive(**options)
+
+ # make a running tally so that we can report the final results
+ total_directories_successful += resp.directories_successful
+ total_files_success += resp.files_successful
+ total_failure_count += resp.failure_count
+ batch_count += 1
+ current_continuation_token = headers['continuation']
+
+ if current_continuation_token is not None:
+ last_continuation_token = current_continuation_token
+
+ if progress_hook is not None:
+ await progress_hook(AccessControlChanges(
+ batch_counters=AccessControlChangeCounters(
+ directories_successful=resp.directories_successful,
+ files_successful=resp.files_successful,
+ failure_count=resp.failure_count,
+ ),
+ aggregate_counters=AccessControlChangeCounters(
+ directories_successful=total_directories_successful,
+ files_successful=total_files_success,
+ failure_count=total_failure_count,
+ ),
+ batch_failures=[AccessControlChangeFailure(
+ name=failure.name,
+ is_directory=failure.type == 'DIRECTORY',
+ error_message=failure.error_message) for failure in resp.failed_entries],
+ continuation=last_continuation_token))
+
+ # update the continuation token, if there are more operations that cannot be completed in a single call
+ max_batches_satisfied = (max_batches is not None and batch_count == max_batches)
+ continue_operation = bool(current_continuation_token) and not max_batches_satisfied
+ options['continuation'] = current_continuation_token
+
+ # currently the service stops on any failure, so we should send back the last continuation token
+ # for the user to retry the failed updates
+ # otherwise we should just return what the service gave us
+ return AccessControlChangeResult(counters=AccessControlChangeCounters(
+ directories_successful=total_directories_successful,
+ files_successful=total_files_success,
+ failure_count=total_failure_count),
+ continuation=last_continuation_token
+ if total_failure_count > 0 and not continue_on_failure else current_continuation_token)
+ except HttpResponseError as error:
+ error.continuation_token = last_continuation_token
+ process_storage_error(error)
+ except AzureError as error:
+ error.continuation_token = last_continuation_token
+ raise error
+
+ async def _rename_path(self, rename_source, **kwargs):
+ # type: (str, **Any) -> Dict[str, Any]
+ """
+ Rename directory or file
+
+ :param rename_source: The value must have the following format: "/{filesystem}/{path}".
+ :type rename_source: str
+ :keyword ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set path properties.
+ :keyword source_lease: A lease ID for the source path. If specified,
+ the source path must have an active lease and the leaase ID must
+ match.
+ :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword lease:
+ Required if the file/directory has an active lease. Value can be a LeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ """
+ options = self._rename_path_options(
+ rename_source,
+ **kwargs)
+ try:
+ return await self._client.path.create(**options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ async def _get_path_properties(self, **kwargs):
+ # type: (**Any) -> Union[FileProperties, DirectoryProperties]
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the file or directory. It does not return the content of the directory or file.
+
+ :keyword lease:
+ Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: DirectoryProperties or FileProperties
+ """
+ path_properties = await self._blob_client.get_blob_properties(**kwargs)
+ return path_properties
+
+ async def _exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a path exists and returns False otherwise.
+
+ :kwarg int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ return await self._blob_client.exists(**kwargs)
+
+ async def set_metadata(self, metadata, # type: Dict[str, str]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets one or more user-defined name-value pairs for the specified
+ file system. Each call to this operation replaces all existing metadata
+ attached to the file system. To remove all metadata from the file system,
+ call this operation with no metadata dict.
+
+ :param metadata:
+ A dict containing name-value pairs to associate with the file system as
+ metadata. Example: {'category':'test'}
+ :type metadata: dict[str, str]
+ :keyword lease:
+ If specified, set_file_system_metadata only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: file system-updated property dict (Etag and last modified).
+ """
+ return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs)
+
+ async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings]
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+ """Sets system properties on the file or directory.
+
+ If one property is set for the content_settings, all properties will be overriden.
+
+ :param ~azure.storage.filedatalake.ContentSettings content_settings:
+ ContentSettings object used to set file/directory properties.
+ :keyword lease:
+ If specified, set_file_system_metadata only succeeds if the
+ file system's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: file/directory-updated property dict (Etag and last modified)
+ :rtype: Dict[str, Any]
+ """
+ return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs)
+
+ async def acquire_lease(self, lease_duration=-1, # type: Optional[int]
+ lease_id=None, # type: Optional[str]
+ **kwargs):
+ # type: (...) -> DataLakeLeaseClient
+ """
+ Requests a new lease. If the file or directory does not have an active lease,
+ the DataLake service creates a lease on the file/directory and returns a new
+ lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The DataLake service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A DataLakeLeaseClient object, that can be run in a context manager.
+ :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/test_file_system_samples.py
+ :start-after: [START acquire_lease_on_file_system]
+ :end-before: [END acquire_lease_on_file_system]
+ :language: python
+ :dedent: 8
+ :caption: Acquiring a lease on the file_system.
+ """
+ lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore
+ await lease.acquire(lease_duration=lease_duration, **kwargs)
+ return lease
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_upload_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_upload_helper.py
new file mode 100644
index 00000000000..00d5bf1ecfd
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/aio/_upload_helper.py
@@ -0,0 +1,103 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+from azure.core.exceptions import HttpResponseError
+from .._deserialize import (
+ process_storage_error)
+from .._shared.response_handlers import return_response_headers
+from .._shared.uploads_async import (
+ upload_data_chunks,
+ DataLakeFileChunkUploader, upload_substream_blocks)
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument
+ return any([
+ modified_access_conditions.if_modified_since,
+ modified_access_conditions.if_unmodified_since,
+ modified_access_conditions.if_none_match,
+ modified_access_conditions.if_match
+ ])
+
+
+async def upload_datalake_file( # pylint: disable=unused-argument
+ client=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ validate_content=None,
+ max_concurrency=None,
+ file_settings=None,
+ **kwargs):
+ try:
+ if length == 0:
+ return {}
+ properties = kwargs.pop('properties', None)
+ umask = kwargs.pop('umask', None)
+ permissions = kwargs.pop('permissions', None)
+ path_http_headers = kwargs.pop('path_http_headers', None)
+ modified_access_conditions = kwargs.pop('modified_access_conditions', None)
+ chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024)
+
+ if not overwrite:
+ # if customers didn't specify access conditions, they cannot flush data to existing file
+ if not _any_conditions(modified_access_conditions):
+ modified_access_conditions.if_none_match = '*'
+ if properties or umask or permissions:
+ raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled")
+
+ if overwrite:
+ response = await client.create(
+ resource='file',
+ path_http_headers=path_http_headers,
+ properties=properties,
+ modified_access_conditions=modified_access_conditions,
+ umask=umask,
+ permissions=permissions,
+ cls=return_response_headers,
+ **kwargs)
+
+ # this modified_access_conditions will be applied to flush_data to make sure
+ # no other flush between create and the current flush
+ modified_access_conditions.if_match = response['etag']
+ modified_access_conditions.if_none_match = None
+ modified_access_conditions.if_modified_since = None
+ modified_access_conditions.if_unmodified_since = None
+
+ use_original_upload_path = file_settings.use_byte_buffer or \
+ validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \
+ hasattr(stream, 'seekable') and not stream.seekable() or \
+ not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+ if use_original_upload_path:
+ await upload_data_chunks(
+ service=client,
+ uploader_class=DataLakeFileChunkUploader,
+ total_size=length,
+ chunk_size=chunk_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ **kwargs)
+ else:
+ await upload_substream_blocks(
+ service=client,
+ uploader_class=DataLakeFileChunkUploader,
+ total_size=length,
+ chunk_size=chunk_size,
+ max_concurrency=max_concurrency,
+ stream=stream,
+ validate_content=validate_content,
+ **kwargs
+ )
+
+ return await client.flush_data(position=length,
+ path_http_headers=path_http_headers,
+ modified_access_conditions=modified_access_conditions,
+ close=True,
+ cls=return_response_headers,
+ **kwargs)
+ except HttpResponseError as error:
+ process_storage_error(error)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/py.typed b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage_filedatalake/v2020_06_12/py.typed
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/__init__.py
index 56200e1b95c..77845c19798 100644
--- a/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/__init__.py
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/__init__.py
@@ -1 +1,6 @@
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
\ No newline at end of file
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+__import__('pkg_resources').declare_namespace(__name__)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/__init__.py
new file mode 100644
index 00000000000..937d74b5403
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/__init__.py
@@ -0,0 +1,229 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+import os
+
+from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import
+from ._version import VERSION
+from ._blob_client import BlobClient
+from ._container_client import ContainerClient
+from ._blob_service_client import BlobServiceClient
+from ._lease import BlobLeaseClient
+from ._download import StorageStreamDownloader
+from ._quick_query_helper import BlobQueryReader
+from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas
+from ._shared.policies import ExponentialRetry, LinearRetry
+from ._shared.response_handlers import PartialBatchErrorException
+from ._shared.models import(
+ LocationMode,
+ ResourceTypes,
+ AccountSasPermissions,
+ StorageErrorCode,
+ UserDelegationKey
+)
+from ._generated.models import (
+ RehydratePriority
+)
+from ._models import (
+ BlobType,
+ BlockState,
+ StandardBlobTier,
+ PremiumPageBlobTier,
+ SequenceNumberAction,
+ PublicAccess,
+ BlobAnalyticsLogging,
+ Metrics,
+ RetentionPolicy,
+ StaticWebsite,
+ CorsRule,
+ ContainerProperties,
+ BlobProperties,
+ FilteredBlob,
+ LeaseProperties,
+ ContentSettings,
+ CopyProperties,
+ BlobBlock,
+ PageRange,
+ AccessPolicy,
+ ContainerSasPermissions,
+ BlobSasPermissions,
+ CustomerProvidedEncryptionKey,
+ ContainerEncryptionScope,
+ BlobQueryError,
+ DelimitedJsonDialect,
+ DelimitedTextDialect,
+ ArrowDialect,
+ ArrowType,
+ ObjectReplicationPolicy,
+ ObjectReplicationRule
+)
+from ._list_blobs_helper import BlobPrefix
+
+__version__ = VERSION
+
+
+def upload_blob_to_url(
+ blob_url, # type: str
+ data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ credential=None, # type: Any
+ **kwargs):
+ # type: (...) -> Dict[str, Any]
+ """Upload data to a given URL
+
+ The data will be uploaded as a block blob.
+
+ :param str blob_url:
+ The full URI to the blob. This can also include a SAS token.
+ :param data:
+ The data to upload. This can be bytes, text, an iterable or a file-like object.
+ :type data: bytes or str or Iterable
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ blob URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword bool overwrite:
+ Whether the blob to be uploaded should overwrite the current data.
+ If True, upload_blob_to_url will overwrite any existing data. If set to False, the
+ operation will fail with a ResourceExistsError.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword dict(str,str) metadata:
+ Name-value pairs associated with the blob as metadata.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https as https (the default) will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used, because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword str encoding:
+ Encoding to use if text is supplied as input. Defaults to UTF-8.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: dict(str, Any)
+ """
+ with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+ return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs)
+
+
+def _download_to_stream(client, handle, **kwargs):
+ """Download data to specified open file-handle."""
+ stream = client.download_blob(**kwargs)
+ stream.readinto(handle)
+
+
+def download_blob_from_url(
+ blob_url, # type: str
+ output, # type: str
+ credential=None, # type: Any
+ **kwargs):
+ # type: (...) -> None
+ """Download the contents of a blob to a local file or stream.
+
+ :param str blob_url:
+ The full URI to the blob. This can also include a SAS token.
+ :param output:
+ Where the data should be downloaded to. This could be either a file path to write to,
+ or an open IO handle to write to.
+ :type output: str or writable stream.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
+ an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword bool overwrite:
+ Whether the local file should be overwritten if it already exists. The default value is
+ `False` - in which case a ValueError will be raised if the file already exists. If set to
+ `True`, an attempt will be made to write to the existing file. If a stream handle is passed
+ in, this value is ignored.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword int offset:
+ Start of byte range to use for downloading a section of the blob.
+ Must be set if length is provided.
+ :keyword int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https as https (the default) will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used, because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :rtype: None
+ """
+ overwrite = kwargs.pop('overwrite', False)
+ with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+ if hasattr(output, 'write'):
+ _download_to_stream(client, output, **kwargs)
+ else:
+ if not overwrite and os.path.isfile(output):
+ raise ValueError("The file '{}' already exists.".format(output))
+ with open(output, 'wb') as file_handle:
+ _download_to_stream(client, file_handle, **kwargs)
+
+
+__all__ = [
+ 'upload_blob_to_url',
+ 'download_blob_from_url',
+ 'BlobServiceClient',
+ 'ContainerClient',
+ 'BlobClient',
+ 'BlobType',
+ 'BlobLeaseClient',
+ 'StorageErrorCode',
+ 'UserDelegationKey',
+ 'ExponentialRetry',
+ 'LinearRetry',
+ 'LocationMode',
+ 'BlockState',
+ 'StandardBlobTier',
+ 'PremiumPageBlobTier',
+ 'SequenceNumberAction',
+ 'PublicAccess',
+ 'BlobAnalyticsLogging',
+ 'Metrics',
+ 'RetentionPolicy',
+ 'StaticWebsite',
+ 'CorsRule',
+ 'ContainerProperties',
+ 'BlobProperties',
+ 'BlobPrefix',
+ 'FilteredBlob',
+ 'LeaseProperties',
+ 'ContentSettings',
+ 'CopyProperties',
+ 'BlobBlock',
+ 'PageRange',
+ 'AccessPolicy',
+ 'ContainerSasPermissions',
+ 'BlobSasPermissions',
+ 'ResourceTypes',
+ 'AccountSasPermissions',
+ 'StorageStreamDownloader',
+ 'CustomerProvidedEncryptionKey',
+ 'RehydratePriority',
+ 'generate_account_sas',
+ 'generate_container_sas',
+ 'generate_blob_sas',
+ 'PartialBatchErrorException',
+ 'ContainerEncryptionScope',
+ 'BlobQueryError',
+ 'DelimitedJsonDialect',
+ 'DelimitedTextDialect',
+ 'ArrowDialect',
+ 'ArrowType',
+ 'BlobQueryReader',
+ 'ObjectReplicationPolicy',
+ 'ObjectReplicationRule'
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_blob_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_blob_client.py
new file mode 100644
index 00000000000..fd3c4a68bed
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_blob_client.py
@@ -0,0 +1,3579 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines,no-self-use
+
+from io import BytesIO
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
+ TYPE_CHECKING
+)
+try:
+ from urllib.parse import urlparse, quote, unquote
+except ImportError:
+ from urlparse import urlparse # type: ignore
+ from urllib2 import quote, unquote # type: ignore
+
+import six
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.exceptions import ResourceNotFoundError
+
+from ._shared import encode_base64
+from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query
+from ._shared.encryption import generate_blob_encryption_data
+from ._shared.uploads import IterStreamer
+from ._shared.request_handlers import (
+ add_metadata_headers, get_length, read_length,
+ validate_and_format_range_headers)
+from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized
+from ._generated import AzureBlobStorage, VERSION
+from ._generated.models import ( # pylint: disable=unused-import
+ DeleteSnapshotsOptionType,
+ BlobHTTPHeaders,
+ BlockLookupList,
+ AppendPositionAccessConditions,
+ SequenceNumberAccessConditions,
+ StorageErrorException,
+ QueryRequest,
+ CpkInfo)
+from ._serialize import (
+ get_modify_conditions,
+ get_source_conditions,
+ get_cpk_scope_info,
+ get_api_version,
+ serialize_blob_tags_header,
+ serialize_blob_tags,
+ serialize_query_format, get_access_conditions
+)
+from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags
+from ._quick_query_helper import BlobQueryReader
+from ._upload_helpers import (
+ upload_block_blob,
+ upload_append_blob,
+ upload_page_blob)
+from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError
+from ._download import StorageStreamDownloader
+from ._lease import BlobLeaseClient
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from ._generated.models import BlockList
+ from ._models import ( # pylint: disable=unused-import
+ ContentSettings,
+ PremiumPageBlobTier,
+ StandardBlobTier,
+ SequenceNumberAction
+ )
+
+_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = (
+ 'The require_encryption flag is set, but encryption is not supported'
+ ' for this method.')
+
+
+class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods
+ """A client to interact with a specific blob, although that blob may not yet exist.
+
+ :param str account_url:
+ The URI to the storage account. In order to create a client given the full URI to the blob,
+ use the :func:`from_blob_url` classmethod.
+ :param container_name: The container name for the blob.
+ :type container_name: str
+ :param blob_name: The name of the blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type blob_name: str
+ :param str snapshot:
+ The optional blob snapshot on which to operate. This can be the snapshot ID string
+ or the response returned from :func:`create_snapshot`.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword str api_version:
+ The Storage API version to use for requests. Default value is '2019-07-07'.
+ Setting to an older version may result in reduced feature compatibility.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str secondary_hostname:
+ The hostname of the secondary endpoint.
+ :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+ Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+ uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+ the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+ :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+ algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+ :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+ :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+ the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+ :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+ or 4MB.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START create_blob_client]
+ :end-before: [END create_blob_client]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobClient from a URL to a public blob (no auth needed).
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START create_blob_client_sas_url]
+ :end-before: [END create_blob_client_sas_url]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobClient from a SAS URL to a blob.
+ """
+ def __init__(
+ self, account_url, # type: str
+ container_name, # type: str
+ blob_name, # type: str
+ snapshot=None, # type: Optional[Union[str, Dict[str, Any]]]
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ try:
+ if not account_url.lower().startswith('http'):
+ account_url = "https://" + account_url
+ except AttributeError:
+ raise ValueError("Account URL must be a string.")
+ parsed_url = urlparse(account_url.rstrip('/'))
+
+ if not (container_name and blob_name):
+ raise ValueError("Please specify a container name and blob name.")
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(account_url))
+
+ path_snapshot, sas_token = parse_query(parsed_url.query)
+
+ self.container_name = container_name
+ self.blob_name = blob_name
+ try:
+ self.snapshot = snapshot.snapshot # type: ignore
+ except AttributeError:
+ try:
+ self.snapshot = snapshot['snapshot'] # type: ignore
+ except TypeError:
+ self.snapshot = snapshot or path_snapshot
+
+ self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot)
+ super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+ self._client = AzureBlobStorage(self.url, pipeline=self._pipeline)
+ self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access
+
+ def _format_url(self, hostname):
+ container_name = self.container_name
+ if isinstance(container_name, six.text_type):
+ container_name = container_name.encode('UTF-8')
+ return "{}://{}/{}/{}{}".format(
+ self.scheme,
+ hostname,
+ quote(container_name),
+ quote(self.blob_name, safe='~/'),
+ self._query_str)
+
+ def _encode_source_url(self, source_url):
+ parsed_source_url = urlparse(source_url)
+ source_scheme = parsed_source_url.scheme
+ source_hostname = parsed_source_url.netloc.rstrip('/')
+ source_path = unquote(parsed_source_url.path)
+ source_query = parsed_source_url.query
+ result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))]
+ if source_query:
+ result.append(source_query)
+ return '?'.join(result)
+
+ @classmethod
+ def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs):
+ # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient
+ """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name.
+
+ :param str blob_url:
+ The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be
+ either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+ :type blob_url: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :param str snapshot:
+ The optional blob snapshot on which to operate. This can be the snapshot ID string
+ or the response returned from :func:`create_snapshot`. If specified, this will override
+ the snapshot in the url.
+ :returns: A Blob client.
+ :rtype: ~azure.storage.blob.BlobClient
+ """
+ try:
+ if not blob_url.lower().startswith('http'):
+ blob_url = "https://" + blob_url
+ except AttributeError:
+ raise ValueError("Blob URL must be a string.")
+ parsed_url = urlparse(blob_url.rstrip('/'))
+
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(blob_url))
+
+ account_path = ""
+ if ".core." in parsed_url.netloc:
+ # .core. is indicating non-customized url. Blob name with directory info can also be parsed.
+ path_blob = parsed_url.path.lstrip('/').split('/', 1)
+ elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc:
+ path_blob = parsed_url.path.lstrip('/').split('/', 2)
+ account_path += path_blob[0]
+ else:
+ # for customized url. blob name that has directory info cannot be parsed.
+ path_blob = parsed_url.path.lstrip('/').split('/')
+ if len(path_blob) > 2:
+ account_path = "/" + "/".join(path_blob[:-2])
+ account_url = "{}://{}{}?{}".format(
+ parsed_url.scheme,
+ parsed_url.netloc.rstrip('/'),
+ account_path,
+ parsed_url.query)
+ container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1])
+ if not container_name or not blob_name:
+ raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.")
+
+ path_snapshot, _ = parse_query(parsed_url.query)
+ if snapshot:
+ try:
+ path_snapshot = snapshot.snapshot # type: ignore
+ except AttributeError:
+ try:
+ path_snapshot = snapshot['snapshot'] # type: ignore
+ except TypeError:
+ path_snapshot = snapshot
+
+ return cls(
+ account_url, container_name=container_name, blob_name=blob_name,
+ snapshot=path_snapshot, credential=credential, **kwargs
+ )
+
+ @classmethod
+ def from_connection_string(
+ cls, conn_str, # type: str
+ container_name, # type: str
+ blob_name, # type: str
+ snapshot=None, # type: Optional[str]
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ): # type: (...) -> BlobClient
+ """Create BlobClient from a Connection String.
+
+ :param str conn_str:
+ A connection string to an Azure Storage account.
+ :param container_name: The container name for the blob.
+ :type container_name: str
+ :param blob_name: The name of the blob with which to interact.
+ :type blob_name: str
+ :param str snapshot:
+ The optional blob snapshot on which to operate. This can be the snapshot ID string
+ or the response returned from :func:`create_snapshot`.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :returns: A Blob client.
+ :rtype: ~azure.storage.blob.BlobClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START auth_from_connection_string_blob]
+ :end-before: [END auth_from_connection_string_blob]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobClient from a connection string.
+ """
+ account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+ if 'secondary_hostname' not in kwargs:
+ kwargs['secondary_hostname'] = secondary
+ return cls(
+ account_url, container_name=container_name, blob_name=blob_name,
+ snapshot=snapshot, credential=credential, **kwargs
+ )
+
+ @distributed_trace
+ def get_account_information(self, **kwargs):
+ # type: (**Any) -> Dict[str, str]
+ """Gets information related to the storage account in which the blob resides.
+
+ The information can also be retrieved if the user has a SAS to a container or blob.
+ The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+ :returns: A dict of account information (SKU and account type).
+ :rtype: dict(str, str)
+ """
+ try:
+ return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _upload_blob_options( # pylint:disable=too-many-statements
+ self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ blob_type=BlobType.BlockBlob, # type: Union[str, BlobType]
+ length=None, # type: Optional[int]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption and not self.key_encryption_key:
+ raise ValueError("Encryption required but no key was provided.")
+ encryption_options = {
+ 'required': self.require_encryption,
+ 'key': self.key_encryption_key,
+ 'resolver': self.key_resolver_function,
+ }
+ if self.key_encryption_key is not None:
+ cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key)
+ encryption_options['cek'] = cek
+ encryption_options['vector'] = iv
+ encryption_options['data'] = encryption_data
+
+ encoding = kwargs.pop('encoding', 'UTF-8')
+ if isinstance(data, six.text_type):
+ data = data.encode(encoding) # type: ignore
+ if length is None:
+ length = get_length(data)
+ if isinstance(data, bytes):
+ data = data[:length]
+
+ if isinstance(data, bytes):
+ stream = BytesIO(data)
+ elif hasattr(data, 'read'):
+ stream = data
+ elif hasattr(data, '__iter__'):
+ stream = IterStreamer(data, encoding=encoding)
+ else:
+ raise TypeError("Unsupported data type: {}".format(type(data)))
+
+ validate_content = kwargs.pop('validate_content', False)
+ content_settings = kwargs.pop('content_settings', None)
+ overwrite = kwargs.pop('overwrite', False)
+ max_concurrency = kwargs.pop('max_concurrency', 1)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ kwargs['cpk_info'] = cpk_info
+
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None))
+ kwargs['modified_access_conditions'] = get_modify_conditions(kwargs)
+ kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs)
+ if content_settings:
+ kwargs['blob_headers'] = BlobHTTPHeaders(
+ blob_cache_control=content_settings.cache_control,
+ blob_content_type=content_settings.content_type,
+ blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+ blob_content_encoding=content_settings.content_encoding,
+ blob_content_language=content_settings.content_language,
+ blob_content_disposition=content_settings.content_disposition
+ )
+ kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None))
+ kwargs['stream'] = stream
+ kwargs['length'] = length
+ kwargs['overwrite'] = overwrite
+ kwargs['headers'] = headers
+ kwargs['validate_content'] = validate_content
+ kwargs['blob_settings'] = self._config
+ kwargs['max_concurrency'] = max_concurrency
+ kwargs['encryption_options'] = encryption_options
+ if blob_type == BlobType.BlockBlob:
+ kwargs['client'] = self._client.block_blob
+ kwargs['data'] = data
+ elif blob_type == BlobType.PageBlob:
+ kwargs['client'] = self._client.page_blob
+ elif blob_type == BlobType.AppendBlob:
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+ kwargs['client'] = self._client.append_blob
+ else:
+ raise ValueError("Unsupported BlobType: {}".format(blob_type))
+ return kwargs
+
+ @distributed_trace
+ def upload_blob( # pylint: disable=too-many-locals
+ self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ blob_type=BlobType.BlockBlob, # type: Union[str, BlobType]
+ length=None, # type: Optional[int]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Any
+ """Creates a new blob from a data source with automatic chunking.
+
+ :param data: The blob data to upload.
+ :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+ either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+ If True, upload_blob will overwrite the existing data. If set to False, the
+ operation will fail with ResourceExistsError. The exception to the above is with Append
+ blob types: if set to False and the data already exists, an error will not be raised
+ and the data will be appended to the existing blob. If set overwrite=True, then the existing
+ append blob will be deleted, and a new one created. Defaults to False.
+ :keyword ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the blob has an active lease. If specified, upload_blob only succeeds if the
+ blob's lease is active and matches this ID. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int max_concurrency:
+ Maximum number of parallel connections to use when the blob size exceeds
+ 64MB.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_hello_world.py
+ :start-after: [START upload_a_blob]
+ :end-before: [END upload_a_blob]
+ :language: python
+ :dedent: 12
+ :caption: Upload a blob to the container.
+ """
+ options = self._upload_blob_options(
+ data,
+ blob_type=blob_type,
+ length=length,
+ metadata=metadata,
+ **kwargs)
+ if blob_type == BlobType.BlockBlob:
+ return upload_block_blob(**options)
+ if blob_type == BlobType.PageBlob:
+ return upload_page_blob(**options)
+ return upload_append_blob(**options)
+
+ def _download_blob_options(self, offset=None, length=None, **kwargs):
+ # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any]
+ if self.require_encryption and not self.key_encryption_key:
+ raise ValueError("Encryption required but no key was provided.")
+ if length is not None and offset is None:
+ raise ValueError("Offset value must not be None if length is set.")
+ if length is not None:
+ length = offset + length - 1 # Service actually uses an end-range inclusive index
+
+ validate_content = kwargs.pop('validate_content', False)
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ options = {
+ 'clients': self._client,
+ 'config': self._config,
+ 'start_range': offset,
+ 'end_range': length,
+ 'version_id': kwargs.pop('version_id', None),
+ 'validate_content': validate_content,
+ 'encryption_options': {
+ 'required': self.require_encryption,
+ 'key': self.key_encryption_key,
+ 'resolver': self.key_resolver_function},
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_info': cpk_info,
+ 'cls': kwargs.pop('cls', None) or deserialize_blob_stream,
+ 'max_concurrency':kwargs.pop('max_concurrency', 1),
+ 'encoding': kwargs.pop('encoding', None),
+ 'timeout': kwargs.pop('timeout', None),
+ 'name': self.blob_name,
+ 'container': self.container_name}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def download_blob(self, offset=None, length=None, **kwargs):
+ # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader
+ """Downloads a blob to the StorageStreamDownloader. The readall() method must
+ be used to read all the content or readinto() must be used to download the blob into
+ a stream.
+
+ :param int offset:
+ Start of byte range to use for downloading a section of the blob.
+ Must be set if length is provided.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to download.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the blob has an active lease. If specified, download_blob only
+ succeeds if the blob's lease is active and matches this ID. Value can be a
+ BlobLeaseClient object or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword str encoding:
+ Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: A streaming object (StorageStreamDownloader)
+ :rtype: ~azure.storage.blob.StorageStreamDownloader
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_hello_world.py
+ :start-after: [START download_a_blob]
+ :end-before: [END download_a_blob]
+ :language: python
+ :dedent: 12
+ :caption: Download a blob.
+ """
+ options = self._download_blob_options(
+ offset=offset,
+ length=length,
+ **kwargs)
+ return StorageStreamDownloader(**options)
+
+ def _quick_query_options(self, query_expression,
+ **kwargs):
+ # type: (str, **Any) -> Dict[str, Any]
+ delimiter = '\n'
+ input_format = kwargs.pop('blob_format', None)
+ if input_format:
+ try:
+ delimiter = input_format.lineterminator
+ except AttributeError:
+ try:
+ delimiter = input_format.delimiter
+ except AttributeError:
+ raise ValueError("The Type of blob_format can only be DelimitedTextDialect or DelimitedJsonDialect")
+ output_format = kwargs.pop('output_format', None)
+ if output_format:
+ try:
+ delimiter = output_format.lineterminator
+ except AttributeError:
+ try:
+ delimiter = output_format.delimiter
+ except AttributeError:
+ pass
+ else:
+ output_format = input_format
+ query_request = QueryRequest(
+ expression=query_expression,
+ input_serialization=serialize_query_format(input_format),
+ output_serialization=serialize_query_format(output_format)
+ )
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(
+ encryption_key=cpk.key_value,
+ encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm
+ )
+ options = {
+ 'query_request': query_request,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_info': cpk_info,
+ 'snapshot': self.snapshot,
+ 'timeout': kwargs.pop('timeout', None),
+ 'cls': return_headers_and_deserialized,
+ }
+ options.update(kwargs)
+ return options, delimiter
+
+ @distributed_trace
+ def query_blob(self, query_expression, **kwargs):
+ # type: (str, **Any) -> BlobQueryReader
+ """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions.
+ This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data.
+
+ :param str query_expression:
+ Required. a query statement.
+ :keyword Callable[~azure.storage.blob.BlobQueryError] on_error:
+ A function to be called on any processing errors returned by the service.
+ :keyword blob_format:
+ Optional. Defines the serialization of the data currently stored in the blob. The default is to
+ treat the blob data as CSV data formatted in the default dialect. This can be overridden with
+ a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect.
+ :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect
+ :keyword output_format:
+ Optional. Defines the output serialization for the data stream. By default the data will be returned
+ as it is represented in the blob. By providing an output format, the blob data will be reformatted
+ according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect.
+ :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect, ~azure.storage.blob.DelimitedJsonDialect
+ or list[~azure.storage.blob.ArrowDialect]
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A streaming object (BlobQueryReader)
+ :rtype: ~azure.storage.blob.BlobQueryReader
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_query.py
+ :start-after: [START query]
+ :end-before: [END query]
+ :language: python
+ :dedent: 4
+ :caption: select/project on blob/or blob snapshot data by providing simple query expressions.
+ """
+ errors = kwargs.pop("on_error", None)
+ error_cls = kwargs.pop("error_cls", BlobQueryError)
+ encoding = kwargs.pop("encoding", None)
+ options, delimiter = self._quick_query_options(query_expression, **kwargs)
+ try:
+ headers, raw_response_body = self._client.blob.query(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return BlobQueryReader(
+ name=self.blob_name,
+ container=self.container_name,
+ errors=errors,
+ record_delimiter=delimiter,
+ encoding=encoding,
+ headers=headers,
+ response=raw_response_body,
+ error_cls=error_cls)
+
+ @staticmethod
+ def _generic_delete_blob_options(delete_snapshots=False, **kwargs):
+ # type: (bool, **Any) -> Dict[str, Any]
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if delete_snapshots:
+ delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots)
+ options = {
+ 'timeout': kwargs.pop('timeout', None),
+ 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs
+ 'delete_snapshots': delete_snapshots or None,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions}
+ options.update(kwargs)
+ return options
+
+ def _delete_blob_options(self, delete_snapshots=False, **kwargs):
+ # type: (bool, **Any) -> Dict[str, Any]
+ if self.snapshot and delete_snapshots:
+ raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.")
+ options = self._generic_delete_blob_options(delete_snapshots, **kwargs)
+ options['snapshot'] = self.snapshot
+ options['version_id'] = kwargs.pop('version_id', None)
+ return options
+
+ @distributed_trace
+ def delete_blob(self, delete_snapshots=False, **kwargs):
+ # type: (str, **Any) -> None
+ """Marks the specified blob for deletion.
+
+ The blob is later deleted during garbage collection.
+ Note that in order to delete a blob, you must delete all of its
+ snapshots. You can delete both at the same time with the delete_blob()
+ operation.
+
+ If a delete retention policy is enabled for the service, then this operation soft deletes the blob
+ and retains the blob for a specified number of days.
+ After the specified number of days, the blob's data is removed from the service during garbage collection.
+ Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
+ option. Soft-deleted blob can be restored using :func:`undelete` operation.
+
+ :param str delete_snapshots:
+ Required if the blob has associated snapshots. Values include:
+ - "only": Deletes only the blobs snapshots.
+ - "include": Deletes the blob along with all snapshots.
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to delete.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword lease:
+ Required if the blob has an active lease. If specified, delete_blob only
+ succeeds if the blob's lease is active and matches this ID. Value can be a
+ BlobLeaseClient object or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_hello_world.py
+ :start-after: [START delete_blob]
+ :end-before: [END delete_blob]
+ :language: python
+ :dedent: 12
+ :caption: Delete a blob.
+ """
+ options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs)
+ try:
+ self._client.blob.delete(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def undelete_blob(self, **kwargs):
+ # type: (**Any) -> None
+ """Restores soft-deleted blobs or snapshots.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common.py
+ :start-after: [START undelete_blob]
+ :end-before: [END undelete_blob]
+ :language: python
+ :dedent: 8
+ :caption: Undeleting a blob.
+ """
+ try:
+ self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace()
+ def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a blob exists with the defined parameters, and returns
+ False otherwise.
+
+ :param str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to check if it exists.
+ :param int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ try:
+ self._client.blob.get_properties(
+ snapshot=self.snapshot,
+ **kwargs)
+ return True
+ except StorageErrorException as error:
+ try:
+ process_storage_error(error)
+ except ResourceNotFoundError:
+ return False
+
+ @distributed_trace
+ def get_blob_properties(self, **kwargs):
+ # type: (**Any) -> BlobProperties
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the blob. It does not return the content of the blob.
+
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to get properties.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: BlobProperties
+ :rtype: ~azure.storage.blob.BlobProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common.py
+ :start-after: [START get_blob_properties]
+ :end-before: [END get_blob_properties]
+ :language: python
+ :dedent: 8
+ :caption: Getting the properties for a blob.
+ """
+ # TODO: extract this out as _get_blob_properties_options
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ try:
+ blob_props = self._client.blob.get_properties(
+ timeout=kwargs.pop('timeout', None),
+ version_id=kwargs.pop('version_id', None),
+ snapshot=self.snapshot,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ cls=kwargs.pop('cls', None) or deserialize_blob_properties,
+ cpk_info=cpk_info,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ blob_props.name = self.blob_name
+ if isinstance(blob_props, BlobProperties):
+ blob_props.container = self.container_name
+ blob_props.snapshot = self.snapshot
+ return blob_props # type: ignore
+
+ def _set_http_headers_options(self, content_settings=None, **kwargs):
+ # type: (Optional[ContentSettings], **Any) -> Dict[str, Any]
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ blob_headers = None
+ if content_settings:
+ blob_headers = BlobHTTPHeaders(
+ blob_cache_control=content_settings.cache_control,
+ blob_content_type=content_settings.content_type,
+ blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+ blob_content_encoding=content_settings.content_encoding,
+ blob_content_language=content_settings.content_language,
+ blob_content_disposition=content_settings.content_disposition
+ )
+ options = {
+ 'timeout': kwargs.pop('timeout', None),
+ 'blob_http_headers': blob_headers,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def set_http_headers(self, content_settings=None, **kwargs):
+ # type: (Optional[ContentSettings], **Any) -> None
+ """Sets system properties on the blob.
+
+ If one property is set for the content_settings, all properties will be overridden.
+
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: Dict[str, Any]
+ """
+ options = self._set_http_headers_options(content_settings=content_settings, **kwargs)
+ try:
+ return self._client.blob.set_http_headers(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _set_blob_metadata_options(self, metadata=None, **kwargs):
+ # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ options = {
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers,
+ 'headers': headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def set_blob_metadata(self, metadata=None, **kwargs):
+ # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]]
+ """Sets user-defined metadata for the blob as one or more name-value pairs.
+
+ :param metadata:
+ Dict containing name and value pairs. Each call to this operation
+ replaces all existing metadata attached to the blob. To remove all
+ metadata from the blob, call this operation with no metadata headers.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified)
+ """
+ options = self._set_blob_metadata_options(metadata=metadata, **kwargs)
+ try:
+ return self._client.blob.set_metadata(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _create_page_blob_options( # type: ignore
+ self, size, # type: int
+ content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ blob_headers = None
+ if content_settings:
+ blob_headers = BlobHTTPHeaders(
+ blob_cache_control=content_settings.cache_control,
+ blob_content_type=content_settings.content_type,
+ blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+ blob_content_encoding=content_settings.content_encoding,
+ blob_content_language=content_settings.content_language,
+ blob_content_disposition=content_settings.content_disposition
+ )
+
+ sequence_number = kwargs.pop('sequence_number', None)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ if premium_page_blob_tier:
+ try:
+ headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore
+ except AttributeError:
+ headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore
+
+ blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None))
+
+ options = {
+ 'content_length': 0,
+ 'blob_content_length': size,
+ 'blob_sequence_number': sequence_number,
+ 'blob_http_headers': blob_headers,
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'blob_tags_string': blob_tags_string,
+ 'cls': return_response_headers,
+ 'headers': headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def create_page_blob( # type: ignore
+ self, size, # type: int
+ content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Creates a new Page Blob of the specified size.
+
+ :param int size:
+ This specifies the maximum size for the page blob, up to 1 TB.
+ The page blob size must be aligned to a 512-byte boundary.
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword int sequence_number:
+ Only for Page blobs. The sequence number is a user-controlled value that you can use to
+ track requests. The value of the sequence number must be between 0
+ and 2^63 - 1.The default value is 0.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict[str, Any]
+ """
+ options = self._create_page_blob_options(
+ size,
+ content_settings=content_settings,
+ metadata=metadata,
+ premium_page_blob_tier=premium_page_blob_tier,
+ **kwargs)
+ try:
+ return self._client.page_blob.create(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ blob_headers = None
+ if content_settings:
+ blob_headers = BlobHTTPHeaders(
+ blob_cache_control=content_settings.cache_control,
+ blob_content_type=content_settings.content_type,
+ blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+ blob_content_encoding=content_settings.content_encoding,
+ blob_content_language=content_settings.content_language,
+ blob_content_disposition=content_settings.content_disposition
+ )
+
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None))
+
+ options = {
+ 'content_length': 0,
+ 'blob_http_headers': blob_headers,
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'blob_tags_string': blob_tags_string,
+ 'cls': return_response_headers,
+ 'headers': headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def create_append_blob(self, content_settings=None, metadata=None, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]]
+ """Creates a new Append Blob.
+
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict[str, Any]
+ """
+ options = self._create_append_blob_options(
+ content_settings=content_settings,
+ metadata=metadata,
+ **kwargs)
+ try:
+ return self._client.append_blob.create(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _create_snapshot_options(self, metadata=None, **kwargs):
+ # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ options = {
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers,
+ 'headers': headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def create_snapshot(self, metadata=None, **kwargs):
+ # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]]
+ """Creates a snapshot of the blob.
+
+ A snapshot is a read-only version of a blob that's taken at a point in time.
+ It can be read, copied, or deleted, but not modified. Snapshots provide a way
+ to back up a blob as it appears at a moment in time.
+
+ A snapshot of a blob has the same name as the base blob from which the snapshot
+ is taken, with a DateTime value appended to indicate the time at which the
+ snapshot was taken.
+
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+
+ .. versionadded:: 12.4.0
+
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
+ :rtype: dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common.py
+ :start-after: [START create_blob_snapshot]
+ :end-before: [END create_blob_snapshot]
+ :language: python
+ :dedent: 8
+ :caption: Create a snapshot of the blob.
+ """
+ options = self._create_snapshot_options(metadata=metadata, **kwargs)
+ try:
+ return self._client.blob.create_snapshot(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs):
+ # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any]
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ if 'source_lease' in kwargs:
+ source_lease = kwargs.pop('source_lease')
+ try:
+ headers['x-ms-source-lease-id'] = source_lease.id # type: str
+ except AttributeError:
+ headers['x-ms-source-lease-id'] = source_lease
+
+ tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None)
+
+ if kwargs.get('requires_sync'):
+ headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync'))
+
+ timeout = kwargs.pop('timeout', None)
+ dest_mod_conditions = get_modify_conditions(kwargs)
+ blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None))
+
+ options = {
+ 'copy_source': source_url,
+ 'seal_blob': kwargs.pop('seal_destination_blob', None),
+ 'timeout': timeout,
+ 'modified_access_conditions': dest_mod_conditions,
+ 'blob_tags_string': blob_tags_string,
+ 'headers': headers,
+ 'cls': return_response_headers,
+ }
+ if not incremental_copy:
+ source_mod_conditions = get_source_conditions(kwargs)
+ dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None))
+ options['source_modified_access_conditions'] = source_mod_conditions
+ options['lease_access_conditions'] = dest_access_conditions
+ options['tier'] = tier.value if tier else None
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs):
+ # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]]
+ """Copies a blob asynchronously.
+
+ This operation returns a copy operation
+ object that can be used to wait on the completion of the operation,
+ as well as check status or abort the copy operation.
+ The Blob service copies blobs on a best-effort basis.
+
+ The source blob for a copy operation may be a block blob, an append blob,
+ or a page blob. If the destination blob already exists, it must be of the
+ same blob type as the source blob. Any existing destination blob will be
+ overwritten. The destination blob cannot be modified while a copy operation
+ is in progress.
+
+ When copying from a page blob, the Blob service creates a destination page
+ blob of the source blob's length, initially containing all zeroes. Then
+ the source page ranges are enumerated, and non-empty ranges are copied.
+
+ For a block blob or an append blob, the Blob service creates a committed
+ blob of zero length before returning from this operation. When copying
+ from a block blob, all committed blocks and their block IDs are copied.
+ Uncommitted blocks are not copied. At the end of the copy operation, the
+ destination blob will have the same committed block count as the source.
+
+ When copying from an append blob, all committed blocks are copied. At the
+ end of the copy operation, the destination blob will have the same committed
+ block count as the source.
+
+ For all blob types, you can call status() on the returned polling object
+ to check the status of the copy operation, or wait() to block until the
+ operation is complete. The final blob will be committed when the copy completes.
+
+ :param str source_url:
+ A URL of up to 2 KB in length that specifies a file or blob.
+ The value should be URL-encoded as it would appear in a request URI.
+ If the source is in another account, the source must either be public
+ or must be authenticated via a shared access signature. If the source
+ is public, no authentication is required.
+ Examples:
+ https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+ https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=
+
+ https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+ :param metadata:
+ Name-value pairs associated with the blob as metadata. If no name-value
+ pairs are specified, the operation will copy the metadata from the
+ source blob or file to the destination blob. If one or more name-value
+ pairs are specified, the destination blob is created with the specified
+ metadata, and metadata is not copied from the source blob or file.
+ :type metadata: dict(str, str)
+ :param bool incremental_copy:
+ Copies the snapshot of the source page blob to a destination page blob.
+ The snapshot is copied such that only the differential changes between
+ the previously copied snapshot are transferred to the destination.
+ The copied snapshots are complete copies of the original snapshot and
+ can be read or copied from as usual. Defaults to False.
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only if the source
+ blob has been modified since the specified date/time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only if the source blob
+ has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only
+ if the destination blob has been modified since the specified date/time.
+ If the destination blob has not been modified, the Blob service returns
+ status code 412 (Precondition Failed).
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only
+ if the destination blob has not been modified since the specified
+ date/time. If the destination blob has been modified, the Blob service
+ returns status code 412 (Precondition Failed).
+ :keyword str etag:
+ The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The destination match condition to use upon the etag.
+ :keyword destination_lease:
+ The lease ID specified for this header must match the lease ID of the
+ destination blob. If the request does not include the lease ID or it is not
+ valid, the operation fails with status code 412 (Precondition Failed).
+ :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword source_lease:
+ Specify this to perform the Copy Blob operation only if
+ the lease ID given matches the active lease ID of the source blob.
+ :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+ Indicates the priority with which to rehydrate an archived blob
+ :keyword bool seal_destination_blob:
+ Seal the destination append blob. This operation is only for append blob.
+
+ .. versionadded:: 12.4.0
+
+ :keyword bool requires_sync:
+ Enforces that the service will not return a response until the copy is complete.
+ :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
+ :rtype: dict[str, str or ~datetime.datetime]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common.py
+ :start-after: [START copy_blob_from_url]
+ :end-before: [END copy_blob_from_url]
+ :language: python
+ :dedent: 12
+ :caption: Copy a blob from a URL.
+ """
+ options = self._start_copy_from_url_options(
+ source_url=self._encode_source_url(source_url),
+ metadata=metadata,
+ incremental_copy=incremental_copy,
+ **kwargs)
+ try:
+ if incremental_copy:
+ return self._client.page_blob.copy_incremental(**options)
+ return self._client.blob.start_copy_from_url(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _abort_copy_options(self, copy_id, **kwargs):
+ # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any]
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ try:
+ copy_id = copy_id.copy.id
+ except AttributeError:
+ try:
+ copy_id = copy_id['copy_id']
+ except TypeError:
+ pass
+ options = {
+ 'copy_id': copy_id,
+ 'lease_access_conditions': access_conditions,
+ 'timeout': kwargs.pop('timeout', None)}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def abort_copy(self, copy_id, **kwargs):
+ # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None
+ """Abort an ongoing copy operation.
+
+ This will leave a destination blob with zero length and full metadata.
+ This will raise an error if the copy operation has already ended.
+
+ :param copy_id:
+ The copy operation to abort. This can be either an ID string, or an
+ instance of BlobProperties.
+ :type copy_id: str or ~azure.storage.blob.BlobProperties
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common.py
+ :start-after: [START abort_copy_blob_from_url]
+ :end-before: [END abort_copy_blob_from_url]
+ :language: python
+ :dedent: 12
+ :caption: Abort copying a blob from URL.
+ """
+ options = self._abort_copy_options(copy_id, **kwargs)
+ try:
+ self._client.blob.abort_copy_from_url(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs):
+ # type: (int, Optional[str], **Any) -> BlobLeaseClient
+ """Requests a new lease.
+
+ If the blob does not have an active lease, the Blob
+ Service creates a lease on the blob and returns a new lease.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The Blob Service
+ returns 400 (Invalid request) if the proposed lease ID is not
+ in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A BlobLeaseClient object.
+ :rtype: ~azure.storage.blob.BlobLeaseClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common.py
+ :start-after: [START acquire_lease_on_blob]
+ :end-before: [END acquire_lease_on_blob]
+ :language: python
+ :dedent: 8
+ :caption: Acquiring a lease on a blob.
+ """
+ lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
+ lease.acquire(lease_duration=lease_duration, **kwargs)
+ return lease
+
+ @distributed_trace
+ def set_standard_blob_tier(self, standard_blob_tier, **kwargs):
+ # type: (Union[str, StandardBlobTier], Any) -> None
+ """This operation sets the tier on a block blob.
+
+ A block blob's tier determines Hot/Cool/Archive storage type.
+ This operation does not update the blob's ETag.
+
+ :param standard_blob_tier:
+ Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
+ 'Archive'. The hot tier is optimized for storing data that is accessed
+ frequently. The cool storage tier is optimized for storing data that
+ is infrequently accessed and stored for at least a month. The archive
+ tier is optimized for storing data that is rarely accessed and stored
+ for at least six months with flexible latency requirements.
+ :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+ :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+ Indicates the priority with which to rehydrate an archived blob
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to download.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :rtype: None
+ """
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if standard_blob_tier is None:
+ raise ValueError("A StandardBlobTier must be specified")
+ if self.snapshot and kwargs.get('version_id'):
+ raise ValueError("Snapshot and version_id cannot be set at the same time")
+ try:
+ self._client.blob.set_tier(
+ tier=standard_blob_tier,
+ snapshot=self.snapshot,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ lease_access_conditions=access_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _stage_block_options(
+ self, block_id, # type: str
+ data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+ block_id = encode_base64(str(block_id))
+ if isinstance(data, six.text_type):
+ data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ if length is None:
+ length = get_length(data)
+ if length is None:
+ length, data = read_length(data)
+ if isinstance(data, bytes):
+ data = data[:length]
+
+ validate_content = kwargs.pop('validate_content', False)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ options = {
+ 'block_id': block_id,
+ 'content_length': length,
+ 'body': data,
+ 'transactional_content_md5': None,
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'validate_content': validate_content,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers,
+ }
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def stage_block(
+ self, block_id, # type: str
+ data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ """Creates a new block to be committed as part of a blob.
+
+ :param str block_id: A string value that identifies the block.
+ The string should be less than or equal to 64 bytes in size.
+ For a given blob, the block_id must be the same size for each block.
+ :param data: The blob data.
+ :param int length: Size of the block.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob property dict.
+ :rtype: dict[str, Any]
+ """
+ options = self._stage_block_options(
+ block_id,
+ data,
+ length=length,
+ **kwargs)
+ try:
+ return self._client.block_blob.stage_block(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _stage_block_from_url_options(
+ self, block_id, # type: str
+ source_url, # type: str
+ source_offset=None, # type: Optional[int]
+ source_length=None, # type: Optional[int]
+ source_content_md5=None, # type: Optional[Union[bytes, bytearray]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if source_length is not None and source_offset is None:
+ raise ValueError("Source offset value must not be None if length is set.")
+ if source_length is not None:
+ source_length = source_offset + source_length - 1
+ block_id = encode_base64(str(block_id))
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ range_header = None
+ if source_offset is not None:
+ range_header, _ = validate_and_format_range_headers(source_offset, source_length)
+
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ options = {
+ 'block_id': block_id,
+ 'content_length': 0,
+ 'source_url': source_url,
+ 'source_range': range_header,
+ 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None,
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers,
+ }
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def stage_block_from_url(
+ self, block_id, # type: str
+ source_url, # type: str
+ source_offset=None, # type: Optional[int]
+ source_length=None, # type: Optional[int]
+ source_content_md5=None, # type: Optional[Union[bytes, bytearray]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ """Creates a new block to be committed as part of a blob where
+ the contents are read from a URL.
+
+ :param str block_id: A string value that identifies the block.
+ The string should be less than or equal to 64 bytes in size.
+ For a given blob, the block_id must be the same size for each block.
+ :param str source_url: The URL.
+ :param int source_offset:
+ Start of byte range to use for the block.
+ Must be set if source length is provided.
+ :param int source_length: The size of the block in bytes.
+ :param bytearray source_content_md5:
+ Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob property dict.
+ :rtype: dict[str, Any]
+ """
+ options = self._stage_block_from_url_options(
+ block_id,
+ source_url=self._encode_source_url(source_url),
+ source_offset=source_offset,
+ source_length=source_length,
+ source_content_md5=source_content_md5,
+ **kwargs)
+ try:
+ return self._client.block_blob.stage_block_from_url(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _get_block_list_result(self, blocks):
+ # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]]
+ committed = [] # type: List
+ uncommitted = [] # type: List
+ if blocks.committed_blocks:
+ committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access
+ if blocks.uncommitted_blocks:
+ uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access
+ return committed, uncommitted
+
+ @distributed_trace
+ def get_block_list(self, block_list_type="committed", **kwargs):
+ # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]]
+ """The Get Block List operation retrieves the list of blocks that have
+ been uploaded as part of a block blob.
+
+ :param str block_list_type:
+ Specifies whether to return the list of committed
+ blocks, the list of uncommitted blocks, or both lists together.
+ Possible values include: 'committed', 'uncommitted', 'all'
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A tuple of two lists - committed and uncommitted blocks
+ :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock))
+ """
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ blocks = self._client.block_blob.get_block_list(
+ list_type=block_list_type,
+ snapshot=self.snapshot,
+ timeout=kwargs.pop('timeout', None),
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return self._get_block_list_result(blocks)
+
+ def _commit_block_list_options( # type: ignore
+ self, block_list, # type: List[BlobBlock]
+ content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+ block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
+ for block in block_list:
+ try:
+ if block.state.value == 'committed':
+ block_lookup.committed.append(encode_base64(str(block.id)))
+ elif block.state.value == 'uncommitted':
+ block_lookup.uncommitted.append(encode_base64(str(block.id)))
+ else:
+ block_lookup.latest.append(encode_base64(str(block.id)))
+ except AttributeError:
+ block_lookup.latest.append(encode_base64(str(block)))
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ blob_headers = None
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if content_settings:
+ blob_headers = BlobHTTPHeaders(
+ blob_cache_control=content_settings.cache_control,
+ blob_content_type=content_settings.content_type,
+ blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None,
+ blob_content_encoding=content_settings.content_encoding,
+ blob_content_language=content_settings.content_language,
+ blob_content_disposition=content_settings.content_disposition
+ )
+
+ validate_content = kwargs.pop('validate_content', False)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ tier = kwargs.pop('standard_blob_tier', None)
+ blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None))
+
+ options = {
+ 'blocks': block_lookup,
+ 'blob_http_headers': blob_headers,
+ 'lease_access_conditions': access_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'modified_access_conditions': mod_conditions,
+ 'cls': return_response_headers,
+ 'validate_content': validate_content,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'tier': tier.value if tier else None,
+ 'blob_tags_string': blob_tags_string,
+ 'headers': headers
+ }
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def commit_block_list( # type: ignore
+ self, block_list, # type: List[BlobBlock]
+ content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """The Commit Block List operation writes a blob by specifying the list of
+ block IDs that make up the blob.
+
+ :param list block_list:
+ List of Blockblobs.
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict[str, str]
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the page content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._commit_block_list_options(
+ block_list,
+ content_settings=content_settings,
+ metadata=metadata,
+ **kwargs)
+ try:
+ return self._client.block_blob.commit_block_list(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs):
+ # type: (Union[str, PremiumPageBlobTier], **Any) -> None
+ """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
+
+ :param premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :rtype: None
+ """
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if premium_page_blob_tier is None:
+ raise ValueError("A PremiumPageBlobTier must be specified")
+ try:
+ self._client.blob.set_tier(
+ tier=premium_page_blob_tier,
+ timeout=kwargs.pop('timeout', None),
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _set_blob_tags_options(self, tags=None, **kwargs):
+ # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ tags = serialize_blob_tags(tags)
+ mod_conditions = get_modify_conditions(kwargs)
+
+ options = {
+ 'tags': tags,
+ 'modified_access_conditions': mod_conditions,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def set_blob_tags(self, tags=None, **kwargs):
+ # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot.
+ Each call to this operation replaces all existing tags attached to the blob. To remove all
+ tags from the blob, call this operation with no tags set.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :param tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+ :type tags: dict(str, str)
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to add tags to.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the tags content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: Dict[str, Any]
+ """
+ options = self._set_blob_tags_options(tags=tags, **kwargs)
+ try:
+ return self._client.blob.set_tags(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _get_blob_tags_options(self, **kwargs):
+ # type: (**Any) -> Dict[str, str]
+ mod_conditions = get_modify_conditions(kwargs)
+
+ options = {
+ 'version_id': kwargs.pop('version_id', None),
+ 'snapshot': self.snapshot,
+ 'modified_access_conditions': mod_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'cls': return_headers_and_deserialized}
+ return options
+
+ @distributed_trace
+ def get_blob_tags(self, **kwargs):
+ # type: (**Any) -> Dict[str, str]
+ """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to add tags to.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on destination blob with a matching value.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Key value pairs of blob tags.
+ :rtype: Dict[str, str]
+ """
+ options = self._get_blob_tags_options(**kwargs)
+ try:
+ _, tags = self._client.blob.get_tags(**options)
+ return parse_tags(tags) # pylint: disable=protected-access
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _get_page_ranges_options( # type: ignore
+ self, offset=None, # type: Optional[int]
+ length=None, # type: Optional[int]
+ previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if length is not None and offset is None:
+ raise ValueError("Offset value must not be None if length is set.")
+ if length is not None:
+ length = offset + length - 1 # Reformat to an inclusive range index
+ page_range, _ = validate_and_format_range_headers(
+ offset, length, start_range_required=False, end_range_required=False, align_to_page=True
+ )
+ options = {
+ 'snapshot': self.snapshot,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'timeout': kwargs.pop('timeout', None),
+ 'range': page_range}
+ if previous_snapshot_diff:
+ try:
+ options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore
+ except AttributeError:
+ try:
+ options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore
+ except TypeError:
+ options['prevsnapshot'] = previous_snapshot_diff
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def get_page_ranges( # type: ignore
+ self, offset=None, # type: Optional[int]
+ length=None, # type: Optional[int]
+ previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]]
+ **kwargs
+ ):
+ # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
+ """Returns the list of valid page ranges for a Page Blob or snapshot
+ of a page blob.
+
+ :param int offset:
+ Start of byte range to use for getting valid page ranges.
+ If no length is given, all bytes after the offset will be searched.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for getting valid page ranges.
+ If length is given, offset must be provided.
+ This range will return valid page ranges from the offset start up to
+ the specified length.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param str previous_snapshot_diff:
+ The snapshot diff parameter that contains an opaque DateTime value that
+ specifies a previous blob snapshot to be compared
+ against a more recent snapshot or the current blob.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns:
+ A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+ The first element are filled page ranges, the 2nd element is cleared page ranges.
+ :rtype: tuple(list(dict(str, str), list(dict(str, str))
+ """
+ options = self._get_page_ranges_options(
+ offset=offset,
+ length=length,
+ previous_snapshot_diff=previous_snapshot_diff,
+ **kwargs)
+ try:
+ if previous_snapshot_diff:
+ ranges = self._client.page_blob.get_page_ranges_diff(**options)
+ else:
+ ranges = self._client.page_blob.get_page_ranges(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return get_page_ranges_result(ranges)
+
+ @distributed_trace
+ def get_page_range_diff_for_managed_disk(
+ self, previous_snapshot_url, # type: str
+ offset=None, # type: Optional[int]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
+ """Returns the list of valid page ranges for a managed disk or snapshot.
+
+ .. note::
+ This operation is only available for managed disk accounts.
+
+ .. versionadded:: 12.2.0
+ This operation was introduced in API version '2019-07-07'.
+
+ :param previous_snapshot_url:
+ Specifies the URL of a previous snapshot of the managed disk.
+ The response will only contain pages that were changed between the target blob and
+ its previous snapshot.
+ :param int offset:
+ Start of byte range to use for getting valid page ranges.
+ If no length is given, all bytes after the offset will be searched.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for getting valid page ranges.
+ If length is given, offset must be provided.
+ This range will return valid page ranges from the offset start up to
+ the specified length.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns:
+ A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+ The first element are filled page ranges, the 2nd element is cleared page ranges.
+ :rtype: tuple(list(dict(str, str), list(dict(str, str))
+ """
+ options = self._get_page_ranges_options(
+ offset=offset,
+ length=length,
+ prev_snapshot_url=previous_snapshot_url,
+ **kwargs)
+ try:
+ ranges = self._client.page_blob.get_page_ranges_diff(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return get_page_ranges_result(ranges)
+
+ def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs):
+ # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any]
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if sequence_number_action is None:
+ raise ValueError("A sequence number action must be specified")
+ options = {
+ 'sequence_number_action': sequence_number_action,
+ 'timeout': kwargs.pop('timeout', None),
+ 'blob_sequence_number': sequence_number,
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs):
+ # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]]
+ """Sets the blob sequence number.
+
+ :param str sequence_number_action:
+ This property indicates how the service should modify the blob's sequence
+ number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
+ :param str sequence_number:
+ This property sets the blob's sequence number. The sequence number is a
+ user-controlled property that you can use to track requests and manage
+ concurrency issues.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._set_sequence_number_options(
+ sequence_number_action, sequence_number=sequence_number, **kwargs)
+ try:
+ return self._client.page_blob.update_sequence_number(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _resize_blob_options(self, size, **kwargs):
+ # type: (int, **Any) -> Dict[str, Any]
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if size is None:
+ raise ValueError("A content length must be specified for a Page Blob.")
+
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ options = {
+ 'blob_content_length': size,
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def resize_blob(self, size, **kwargs):
+ # type: (int, **Any) -> Dict[str, Union[str, datetime]]
+ """Resizes a page blob to the specified size.
+
+ If the specified value is less than the current size of the blob,
+ then all pages above the specified value are cleared.
+
+ :param int size:
+ Size used to resize blob. Maximum size for a page blob is up to 1 TB.
+ The page blob size must be aligned to a 512-byte boundary.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._resize_blob_options(size, **kwargs)
+ try:
+ return self._client.page_blob.resize(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _upload_page_options( # type: ignore
+ self, page, # type: bytes
+ offset, # type: int
+ length, # type: int
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if isinstance(page, six.text_type):
+ page = page.encode(kwargs.pop('encoding', 'UTF-8'))
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+
+ if offset is None or offset % 512 != 0:
+ raise ValueError("offset must be an integer that aligns with 512 page size")
+ if length is None or length % 512 != 0:
+ raise ValueError("length must be an integer that aligns with 512 page size")
+ end_range = offset + length - 1 # Reformat to an inclusive range index
+ content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ seq_conditions = SequenceNumberAccessConditions(
+ if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
+ if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
+ if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
+ )
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ validate_content = kwargs.pop('validate_content', False)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ options = {
+ 'body': page[:length],
+ 'content_length': length,
+ 'transactional_content_md5': None,
+ 'timeout': kwargs.pop('timeout', None),
+ 'range': content_range,
+ 'lease_access_conditions': access_conditions,
+ 'sequence_number_access_conditions': seq_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'validate_content': validate_content,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def upload_page( # type: ignore
+ self, page, # type: bytes
+ offset, # type: int
+ length, # type: int
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """The Upload Pages operation writes a range of pages to a page blob.
+
+ :param bytes page:
+ Content of the page.
+ :param int offset:
+ Start of byte range to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the page content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword int if_sequence_number_lte:
+ If the blob's sequence number is less than or equal to
+ the specified value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_lt:
+ If the blob's sequence number is less than the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_eq:
+ If the blob's sequence number is equal to the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._upload_page_options(
+ page=page,
+ offset=offset,
+ length=length,
+ **kwargs)
+ try:
+ return self._client.page_blob.upload_pages(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _upload_pages_from_url_options( # type: ignore
+ self, source_url, # type: str
+ offset, # type: int
+ length, # type: int
+ source_offset, # type: int
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+
+ # TODO: extract the code to a method format_range
+ if offset is None or offset % 512 != 0:
+ raise ValueError("offset must be an integer that aligns with 512 page size")
+ if length is None or length % 512 != 0:
+ raise ValueError("length must be an integer that aligns with 512 page size")
+ if source_offset is None or offset % 512 != 0:
+ raise ValueError("source_offset must be an integer that aligns with 512 page size")
+
+ # Format range
+ end_range = offset + length - 1
+ destination_range = 'bytes={0}-{1}'.format(offset, end_range)
+ source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here?
+
+ seq_conditions = SequenceNumberAccessConditions(
+ if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
+ if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
+ if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
+ )
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ source_mod_conditions = get_source_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ source_content_md5 = kwargs.pop('source_content_md5', None)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ options = {
+ 'source_url': source_url,
+ 'content_length': 0,
+ 'source_range': source_range,
+ 'range': destination_range,
+ 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None,
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'sequence_number_access_conditions': seq_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'source_modified_access_conditions': source_mod_conditions,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def upload_pages_from_url(self, source_url, # type: str
+ offset, # type: int
+ length, # type: int
+ source_offset, # type: int
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ """
+ The Upload Pages operation writes a range of pages to a page blob where
+ the contents are read from a URL.
+
+ :param str source_url:
+ The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+ shared access signature attached.
+ :param int offset:
+ Start of byte range to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int source_offset:
+ This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+ The service will read the same number of bytes as the destination range (length-offset).
+ :keyword bytes source_content_md5:
+ If given, the service will calculate the MD5 hash of the block content and compare against this value.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the source resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the source resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword int if_sequence_number_lte:
+ If the blob's sequence number is less than or equal to
+ the specified value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_lt:
+ If the blob's sequence number is less than the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_eq:
+ If the blob's sequence number is equal to the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The destination match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ """
+ options = self._upload_pages_from_url_options(
+ source_url=self._encode_source_url(source_url),
+ offset=offset,
+ length=length,
+ source_offset=source_offset,
+ **kwargs
+ )
+ try:
+ return self._client.page_blob.upload_pages_from_url(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _clear_page_options(self, offset, length, **kwargs):
+ # type: (int, int, **Any) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ seq_conditions = SequenceNumberAccessConditions(
+ if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None),
+ if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None),
+ if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None)
+ )
+ mod_conditions = get_modify_conditions(kwargs)
+ if offset is None or offset % 512 != 0:
+ raise ValueError("offset must be an integer that aligns with 512 page size")
+ if length is None or length % 512 != 0:
+ raise ValueError("length must be an integer that aligns with 512 page size")
+ end_range = length + offset - 1 # Reformat to an inclusive range index
+ content_range = 'bytes={0}-{1}'.format(offset, end_range)
+
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ options = {
+ 'content_length': 0,
+ 'timeout': kwargs.pop('timeout', None),
+ 'range': content_range,
+ 'lease_access_conditions': access_conditions,
+ 'sequence_number_access_conditions': seq_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def clear_page(self, offset, length, **kwargs):
+ # type: (int, int, **Any) -> Dict[str, Union[str, datetime]]
+ """Clears a range of pages.
+
+ :param int offset:
+ Start of byte range to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword int if_sequence_number_lte:
+ If the blob's sequence number is less than or equal to
+ the specified value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_lt:
+ If the blob's sequence number is less than the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_eq:
+ If the blob's sequence number is equal to the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._clear_page_options(offset, length, **kwargs)
+ try:
+ return self._client.page_blob.clear_pages(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _append_block_options( # type: ignore
+ self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+
+ if isinstance(data, six.text_type):
+ data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore
+ if length is None:
+ length = get_length(data)
+ if length is None:
+ length, data = read_length(data)
+ if length == 0:
+ return {}
+ if isinstance(data, bytes):
+ data = data[:length]
+
+ appendpos_condition = kwargs.pop('appendpos_condition', None)
+ maxsize_condition = kwargs.pop('maxsize_condition', None)
+ validate_content = kwargs.pop('validate_content', False)
+ append_conditions = None
+ if maxsize_condition or appendpos_condition is not None:
+ append_conditions = AppendPositionAccessConditions(
+ max_size=maxsize_condition,
+ append_position=appendpos_condition
+ )
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ options = {
+ 'body': data,
+ 'content_length': length,
+ 'timeout': kwargs.pop('timeout', None),
+ 'transactional_content_md5': None,
+ 'lease_access_conditions': access_conditions,
+ 'append_position_access_conditions': append_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'validate_content': validate_content,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def append_block( # type: ignore
+ self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """Commits a new block of data to the end of the existing append blob.
+
+ :param data:
+ Content of the block. This can be bytes, text, an iterable or a file-like object.
+ :type data: bytes or str or Iterable
+ :param int length:
+ Size of the block in bytes.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the block content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int appendpos_condition:
+ Optional conditional header, used only for the Append Block operation.
+ A number indicating the byte offset to compare. Append Block will
+ succeed only if the append position is equal to this number. If it
+ is not, the request will fail with the AppendPositionConditionNotMet error
+ (HTTP status code 412 - Precondition Failed).
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+ :rtype: dict(str, Any)
+ """
+ options = self._append_block_options(
+ data,
+ length=length,
+ **kwargs
+ )
+ try:
+ return self._client.append_blob.append_block(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _append_block_from_url_options( # type: ignore
+ self, copy_source_url, # type: str
+ source_offset=None, # type: Optional[int]
+ source_length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+
+ # If end range is provided, start range must be provided
+ if source_length is not None and source_offset is None:
+ raise ValueError("source_offset should also be specified if source_length is specified")
+ # Format based on whether length is present
+ source_range = None
+ if source_length is not None:
+ end_range = source_offset + source_length - 1
+ source_range = 'bytes={0}-{1}'.format(source_offset, end_range)
+ elif source_offset is not None:
+ source_range = "bytes={0}-".format(source_offset)
+
+ appendpos_condition = kwargs.pop('appendpos_condition', None)
+ maxsize_condition = kwargs.pop('maxsize_condition', None)
+ source_content_md5 = kwargs.pop('source_content_md5', None)
+ append_conditions = None
+ if maxsize_condition or appendpos_condition is not None:
+ append_conditions = AppendPositionAccessConditions(
+ max_size=maxsize_condition,
+ append_position=appendpos_condition
+ )
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ source_mod_conditions = get_source_conditions(kwargs)
+ cpk_scope_info = get_cpk_scope_info(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+
+ options = {
+ 'source_url': copy_source_url,
+ 'content_length': 0,
+ 'source_range': source_range,
+ 'source_content_md5': source_content_md5,
+ 'transactional_content_md5': None,
+ 'lease_access_conditions': access_conditions,
+ 'append_position_access_conditions': append_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'source_modified_access_conditions': source_mod_conditions,
+ 'cpk_scope_info': cpk_scope_info,
+ 'cpk_info': cpk_info,
+ 'cls': return_response_headers,
+ 'timeout': kwargs.pop('timeout', None)}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def append_block_from_url(self, copy_source_url, # type: str
+ source_offset=None, # type: Optional[int]
+ source_length=None, # type: Optional[int]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """
+ Creates a new block to be committed as part of a blob, where the contents are read from a source url.
+
+ :param str copy_source_url:
+ The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+ shared access signature attached.
+ :param int source_offset:
+ This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source.
+ :param int source_length:
+ This indicates the end of the range of bytes that has to be taken from the copy source.
+ :keyword bytearray source_content_md5:
+ If given, the service will calculate the MD5 hash of the block content and compare against this value.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int appendpos_condition:
+ Optional conditional header, used only for the Append Block operation.
+ A number indicating the byte offset to compare. Append Block will
+ succeed only if the append position is equal to this number. If it
+ is not, the request will fail with the
+ AppendPositionConditionNotMet error
+ (HTTP status code 412 - Precondition Failed).
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The destination match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the source resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the source resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ """
+ options = self._append_block_from_url_options(
+ copy_source_url=self._encode_source_url(copy_source_url),
+ source_offset=source_offset,
+ source_length=source_length,
+ **kwargs
+ )
+ try:
+ return self._client.append_blob.append_block_from_url(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _seal_append_blob_options(self, **kwargs):
+ # type: (...) -> Dict[str, Any]
+ if self.require_encryption or (self.key_encryption_key is not None):
+ raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+
+ appendpos_condition = kwargs.pop('appendpos_condition', None)
+ append_conditions = None
+ if appendpos_condition is not None:
+ append_conditions = AppendPositionAccessConditions(
+ append_position=appendpos_condition
+ )
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+
+ options = {
+ 'timeout': kwargs.pop('timeout', None),
+ 'lease_access_conditions': access_conditions,
+ 'append_position_access_conditions': append_conditions,
+ 'modified_access_conditions': mod_conditions,
+ 'cls': return_response_headers}
+ options.update(kwargs)
+ return options
+
+ @distributed_trace
+ def seal_append_blob(self, **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """The Seal operation seals the Append Blob to make it read-only.
+
+ .. versionadded:: 12.4.0
+
+ :keyword int appendpos_condition:
+ Optional conditional header, used only for the Append Block operation.
+ A number indicating the byte offset to compare. Append Block will
+ succeed only if the append position is equal to this number. If it
+ is not, the request will fail with the AppendPositionConditionNotMet error
+ (HTTP status code 412 - Precondition Failed).
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+ :rtype: dict(str, Any)
+ """
+ options = self._seal_append_blob_options(**kwargs)
+ try:
+ return self._client.append_blob.seal(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_blob_service_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_blob_service_client.py
new file mode 100644
index 00000000000..1ab68c1f0fe
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_blob_service_client.py
@@ -0,0 +1,691 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import functools
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List,
+ TYPE_CHECKING
+)
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse # type: ignore
+
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import Pipeline
+from azure.core.tracing.decorator import distributed_trace
+
+from ._shared.models import LocationMode
+from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
+from ._shared.parser import _to_utc_datetime
+from ._shared.response_handlers import return_response_headers, process_storage_error, \
+ parse_to_internal_user_delegation_key
+from ._generated import AzureBlobStorage, VERSION
+from ._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo
+from ._container_client import ContainerClient
+from ._blob_client import BlobClient
+from ._models import ContainerPropertiesPaged, FilteredBlobPaged
+from ._serialize import get_api_version
+from ._deserialize import service_stats_deserialize, service_properties_deserialize
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from azure.core.pipeline.transport import HttpTransport
+ from azure.core.pipeline.policies import HTTPPolicy
+ from ._shared.models import UserDelegationKey
+ from ._lease import BlobLeaseClient
+ from ._models import (
+ ContainerProperties,
+ BlobProperties,
+ PublicAccess,
+ BlobAnalyticsLogging,
+ Metrics,
+ CorsRule,
+ RetentionPolicy,
+ StaticWebsite,
+ )
+
+
+class BlobServiceClient(StorageAccountHostsMixin):
+ """A client to interact with the Blob Service at the account level.
+
+ This client provides operations to retrieve and configure the account properties
+ as well as list, create and delete containers within the account.
+ For operations relating to a specific container or blob, clients for those entities
+ can also be retrieved using the `get_client` functions.
+
+ :param str account_url:
+ The URL to the blob storage account. Any other entities included
+ in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
+ authenticated with a SAS token.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword str api_version:
+ The Storage API version to use for requests. Default value is '2019-07-07'.
+ Setting to an older version may result in reduced feature compatibility.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str secondary_hostname:
+ The hostname of the secondary endpoint.
+ :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+ Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+ uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+ the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+ :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+ algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+ :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+ :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+ the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+ :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+ or 4MB.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START create_blob_service_client]
+ :end-before: [END create_blob_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobServiceClient with account url and credential.
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START create_blob_service_client_oauth]
+ :end-before: [END create_blob_service_client_oauth]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobServiceClient with Azure Identity credentials.
+ """
+
+ def __init__(
+ self, account_url, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ try:
+ if not account_url.lower().startswith('http'):
+ account_url = "https://" + account_url
+ except AttributeError:
+ raise ValueError("Account URL must be a string.")
+ parsed_url = urlparse(account_url.rstrip('/'))
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(account_url))
+
+ _, sas_token = parse_query(parsed_url.query)
+ self._query_str, credential = self._format_query_string(sas_token, credential)
+ super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+ self._client = AzureBlobStorage(self.url, pipeline=self._pipeline)
+ self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access
+
+ def _format_url(self, hostname):
+ """Format the endpoint URL according to the current location
+ mode hostname.
+ """
+ return "{}://{}/{}".format(self.scheme, hostname, self._query_str)
+
+ @classmethod
+ def from_connection_string(
+ cls, conn_str, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ): # type: (...) -> BlobServiceClient
+ """Create BlobServiceClient from a Connection String.
+
+ :param str conn_str:
+ A connection string to an Azure Storage account.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :returns: A Blob service client.
+ :rtype: ~azure.storage.blob.BlobServiceClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START auth_from_connection_string]
+ :end-before: [END auth_from_connection_string]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobServiceClient from a connection string.
+ """
+ account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+ if 'secondary_hostname' not in kwargs:
+ kwargs['secondary_hostname'] = secondary
+ return cls(account_url, credential=credential, **kwargs)
+
+ @distributed_trace
+ def get_user_delegation_key(self, key_start_time, # type: datetime
+ key_expiry_time, # type: datetime
+ **kwargs # type: Any
+ ):
+ # type: (...) -> UserDelegationKey
+ """
+ Obtain a user delegation key for the purpose of signing SAS tokens.
+ A token credential must be present on the service object for this request to succeed.
+
+ :param ~datetime.datetime key_start_time:
+ A DateTime value. Indicates when the key becomes valid.
+ :param ~datetime.datetime key_expiry_time:
+ A DateTime value. Indicates when the key stops being valid.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: The user delegation key.
+ :rtype: ~azure.storage.blob.UserDelegationKey
+ """
+ key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
+ timeout = kwargs.pop('timeout', None)
+ try:
+ user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info,
+ timeout=timeout,
+ **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore
+
+ @distributed_trace
+ def get_account_information(self, **kwargs):
+ # type: (Any) -> Dict[str, str]
+ """Gets information related to the storage account.
+
+ The information can also be retrieved if the user has a SAS to a container or blob.
+ The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+ :returns: A dict of account information (SKU and account type).
+ :rtype: dict(str, str)
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START get_blob_service_account_info]
+ :end-before: [END get_blob_service_account_info]
+ :language: python
+ :dedent: 8
+ :caption: Getting account information for the blob service.
+ """
+ try:
+ return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def get_service_stats(self, **kwargs):
+ # type: (**Any) -> Dict[str, Any]
+ """Retrieves statistics related to replication for the Blob service.
+
+ It is only available when read-access geo-redundant replication is enabled for
+ the storage account.
+
+ With geo-redundant replication, Azure Storage maintains your data durable
+ in two locations. In both locations, Azure Storage constantly maintains
+ multiple healthy replicas of your data. The location where you read,
+ create, update, or delete data is the primary storage account location.
+ The primary location exists in the region you choose at the time you
+ create an account via the Azure Management Azure classic portal, for
+ example, North Central US. The location to which your data is replicated
+ is the secondary location. The secondary location is automatically
+ determined based on the location of the primary; it is in a second data
+ center that resides in the same region as the primary location. Read-only
+ access is available from the secondary location, if read-access geo-redundant
+ replication is enabled for your storage account.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: The blob service stats.
+ :rtype: Dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START get_blob_service_stats]
+ :end-before: [END get_blob_service_stats]
+ :language: python
+ :dedent: 8
+ :caption: Getting service stats for the blob service.
+ """
+ timeout = kwargs.pop('timeout', None)
+ try:
+ stats = self._client.service.get_statistics( # type: ignore
+ timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
+ return service_stats_deserialize(stats)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def get_service_properties(self, **kwargs):
+ # type: (Any) -> Dict[str, Any]
+ """Gets the properties of a storage account's Blob service, including
+ Azure Storage Analytics.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An object containing blob service properties such as
+ analytics logging, hour/minute metrics, cors rules, etc.
+ :rtype: Dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START get_blob_service_properties]
+ :end-before: [END get_blob_service_properties]
+ :language: python
+ :dedent: 8
+ :caption: Getting service properties for the blob service.
+ """
+ timeout = kwargs.pop('timeout', None)
+ try:
+ service_props = self._client.service.get_properties(timeout=timeout, **kwargs)
+ return service_properties_deserialize(service_props)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def set_service_properties(
+ self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging]
+ hour_metrics=None, # type: Optional[Metrics]
+ minute_metrics=None, # type: Optional[Metrics]
+ cors=None, # type: Optional[List[CorsRule]]
+ target_version=None, # type: Optional[str]
+ delete_retention_policy=None, # type: Optional[RetentionPolicy]
+ static_website=None, # type: Optional[StaticWebsite]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Sets the properties of a storage account's Blob service, including
+ Azure Storage Analytics.
+
+ If an element (e.g. analytics_logging) is left as None, the
+ existing settings on the service for that functionality are preserved.
+
+ :param analytics_logging:
+ Groups the Azure Analytics Logging settings.
+ :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
+ :param hour_metrics:
+ The hour metrics settings provide a summary of request
+ statistics grouped by API in hourly aggregates for blobs.
+ :type hour_metrics: ~azure.storage.blob.Metrics
+ :param minute_metrics:
+ The minute metrics settings provide request statistics
+ for each minute for blobs.
+ :type minute_metrics: ~azure.storage.blob.Metrics
+ :param cors:
+ You can include up to five CorsRule elements in the
+ list. If an empty list is specified, all CORS rules will be deleted,
+ and CORS will be disabled for the service.
+ :type cors: list[~azure.storage.blob.CorsRule]
+ :param str target_version:
+ Indicates the default version to use for requests if an incoming
+ request's version is not specified.
+ :param delete_retention_policy:
+ The delete retention policy specifies whether to retain deleted blobs.
+ It also specifies the number of days and versions of blob to keep.
+ :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
+ :param static_website:
+ Specifies whether the static website feature is enabled,
+ and if yes, indicates the index document and 404 error document to use.
+ :type static_website: ~azure.storage.blob.StaticWebsite
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START set_blob_service_properties]
+ :end-before: [END set_blob_service_properties]
+ :language: python
+ :dedent: 8
+ :caption: Setting service properties for the blob service.
+ """
+ if all(parameter is None for parameter in [
+ analytics_logging, hour_metrics, minute_metrics, cors,
+ target_version, delete_retention_policy, static_website]):
+ raise ValueError("set_service_properties should be called with at least one parameter")
+
+ props = StorageServiceProperties(
+ logging=analytics_logging,
+ hour_metrics=hour_metrics,
+ minute_metrics=minute_metrics,
+ cors=cors,
+ default_service_version=target_version,
+ delete_retention_policy=delete_retention_policy,
+ static_website=static_website
+ )
+ timeout = kwargs.pop('timeout', None)
+ try:
+ self._client.service.set_properties(props, timeout=timeout, **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def list_containers(
+ self, name_starts_with=None, # type: Optional[str]
+ include_metadata=False, # type: Optional[bool]
+ **kwargs
+ ):
+ # type: (...) -> ItemPaged[ContainerProperties]
+ """Returns a generator to list the containers under the specified account.
+
+ The generator will lazily follow the continuation tokens returned by
+ the service and stop when all containers have been returned.
+
+ :param str name_starts_with:
+ Filters the results to return only containers whose names
+ begin with the specified prefix.
+ :param bool include_metadata:
+ Specifies that container metadata to be returned in the response.
+ The default value is `False`.
+ :keyword bool include_deleted:
+ Specifies that deleted containers to be returned in the response. This is for container restore enabled
+ account. The default value is `False`.
+ .. versionadded:: 12.4.0
+ :keyword int results_per_page:
+ The maximum number of container names to retrieve per API
+ call. If the request does not specify the server will return up to 5,000 items.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) of ContainerProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START bsc_list_containers]
+ :end-before: [END bsc_list_containers]
+ :language: python
+ :dedent: 12
+ :caption: Listing the containers in the blob service.
+ """
+ include = ['metadata'] if include_metadata else []
+ include_deleted = kwargs.pop('include_deleted', None)
+ if include_deleted:
+ include.append("deleted")
+
+ timeout = kwargs.pop('timeout', None)
+ results_per_page = kwargs.pop('results_per_page', None)
+ command = functools.partial(
+ self._client.service.list_containers_segment,
+ prefix=name_starts_with,
+ include=include,
+ timeout=timeout,
+ **kwargs)
+ return ItemPaged(
+ command,
+ prefix=name_starts_with,
+ results_per_page=results_per_page,
+ page_iterator_class=ContainerPropertiesPaged
+ )
+
+ @distributed_trace
+ def find_blobs_by_tags(self, filter_expression, **kwargs):
+ # type: (str, **Any) -> ItemPaged[FilteredBlob]
+ """The Filter Blobs operation enables callers to list blobs across all
+ containers whose tags match a given search expression. Filter blobs
+ searches across all containers within a storage account but can be
+ scoped within the expression to a single container.
+
+ :param str filter_expression:
+ The expression to find blobs whose tags matches the specified condition.
+ eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+ To specify a container, eg. "@container='containerName' and \"Name\"='C'"
+ :keyword int results_per_page:
+ The max result per page when paginating.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of BlobProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob]
+ """
+
+ results_per_page = kwargs.pop('results_per_page', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._client.service.filter_blobs,
+ where=filter_expression,
+ timeout=timeout,
+ **kwargs)
+ return ItemPaged(
+ command, results_per_page=results_per_page,
+ page_iterator_class=FilteredBlobPaged)
+
+ @distributed_trace
+ def create_container(
+ self, name, # type: str
+ metadata=None, # type: Optional[Dict[str, str]]
+ public_access=None, # type: Optional[Union[PublicAccess, str]]
+ **kwargs
+ ):
+ # type: (...) -> ContainerClient
+ """Creates a new container under the specified account.
+
+ If the container with the same name already exists, a ResourceExistsError will
+ be raised. This method returns a client with which to interact with the newly
+ created container.
+
+ :param str name: The name of the container to create.
+ :param metadata:
+ A dict with name-value pairs to associate with the
+ container as metadata. Example: `{'Category':'test'}`
+ :type metadata: dict(str, str)
+ :param public_access:
+ Possible values include: 'container', 'blob'.
+ :type public_access: str or ~azure.storage.blob.PublicAccess
+ :keyword container_encryption_scope:
+ Specifies the default encryption scope to set on the container and use for
+ all future writes.
+
+ .. versionadded:: 12.2.0
+
+ :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.blob.ContainerClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START bsc_create_container]
+ :end-before: [END bsc_create_container]
+ :language: python
+ :dedent: 12
+ :caption: Creating a container in the blob service.
+ """
+ container = self.get_container_client(name)
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ container.create_container(
+ metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
+ return container
+
+ @distributed_trace
+ def delete_container(
+ self, container, # type: Union[ContainerProperties, str]
+ lease=None, # type: Optional[Union[BlobLeaseClient, str]]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Marks the specified container for deletion.
+
+ The container and any blobs contained within it are later deleted during garbage collection.
+ If the container is not found, a ResourceNotFoundError will be raised.
+
+ :param container:
+ The container to delete. This can either be the name of the container,
+ or an instance of ContainerProperties.
+ :type container: str or ~azure.storage.blob.ContainerProperties
+ :param lease:
+ If specified, delete_container only succeeds if the
+ container's lease is active and matches this ID.
+ Required if the container has an active lease.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START bsc_delete_container]
+ :end-before: [END bsc_delete_container]
+ :language: python
+ :dedent: 12
+ :caption: Deleting a container in the blob service.
+ """
+ container = self.get_container_client(container) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ container.delete_container( # type: ignore
+ lease=lease,
+ timeout=timeout,
+ **kwargs)
+
+ @distributed_trace
+ def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs):
+ # type: (str, str, str, **Any) -> ContainerClient
+ """Restores soft-deleted container.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :param str deleted_container_name:
+ Specifies the name of the deleted container to restore.
+ :param str deleted_container_version:
+ Specifies the version of the deleted container to restore.
+ :keyword str new_name:
+ The new name for the deleted container to be restored to.
+ If not specified deleted_container_name will be used as the restored container name.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.blob.ContainerClient
+ """
+ new_name = kwargs.pop('new_name', None)
+ container = self.get_container_client(new_name or deleted_container_name)
+ try:
+ container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access
+ deleted_container_version=deleted_container_version,
+ timeout=kwargs.pop('timeout', None), **kwargs)
+ return container
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def get_container_client(self, container):
+ # type: (Union[ContainerProperties, str]) -> ContainerClient
+ """Get a client to interact with the specified container.
+
+ The container need not already exist.
+
+ :param container:
+ The container. This can either be the name of the container,
+ or an instance of ContainerProperties.
+ :type container: str or ~azure.storage.blob.ContainerProperties
+ :returns: A ContainerClient.
+ :rtype: ~azure.storage.blob.ContainerClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START bsc_get_container_client]
+ :end-before: [END bsc_get_container_client]
+ :language: python
+ :dedent: 8
+ :caption: Getting the container client to interact with a specific container.
+ """
+ try:
+ container_name = container.name
+ except AttributeError:
+ container_name = container
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return ContainerClient(
+ self.url, container_name=container_name,
+ credential=self.credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
+
+ def get_blob_client(
+ self, container, # type: Union[ContainerProperties, str]
+ blob, # type: Union[BlobProperties, str]
+ snapshot=None # type: Optional[Union[Dict[str, Any], str]]
+ ):
+ # type: (...) -> BlobClient
+ """Get a client to interact with the specified blob.
+
+ The blob need not already exist.
+
+ :param container:
+ The container that the blob is in. This can either be the name of the container,
+ or an instance of ContainerProperties.
+ :type container: str or ~azure.storage.blob.ContainerProperties
+ :param blob:
+ The blob with which to interact. This can either be the name of the blob,
+ or an instance of BlobProperties.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param snapshot:
+ The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
+ or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`.
+ :type snapshot: str or dict(str, Any)
+ :returns: A BlobClient.
+ :rtype: ~azure.storage.blob.BlobClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service.py
+ :start-after: [START bsc_get_blob_client]
+ :end-before: [END bsc_get_blob_client]
+ :language: python
+ :dedent: 12
+ :caption: Getting the blob client to interact with a specific blob.
+ """
+ try:
+ container_name = container.name
+ except AttributeError:
+ container_name = container
+ try:
+ blob_name = blob.name
+ except AttributeError:
+ blob_name = blob
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return BlobClient( # type: ignore
+ self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
+ credential=self.credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_container_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_container_client.py
new file mode 100644
index 00000000000..ba327c2498e
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_container_client.py
@@ -0,0 +1,1448 @@
+# pylint: disable=too-many-lines
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import functools
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator,
+ TYPE_CHECKING
+)
+
+try:
+ from urllib.parse import urlparse, quote, unquote
+except ImportError:
+ from urlparse import urlparse # type: ignore
+ from urllib2 import quote, unquote # type: ignore
+
+import six
+
+from azure.core import MatchConditions
+from azure.core.paging import ItemPaged
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.pipeline import Pipeline
+from azure.core.pipeline.transport import HttpRequest
+
+from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query
+from ._shared.request_handlers import add_metadata_headers, serialize_iso
+from ._shared.response_handlers import (
+ process_storage_error,
+ return_response_headers,
+ return_headers_and_deserialized)
+from ._generated import AzureBlobStorage, VERSION
+from ._generated.models import (
+ StorageErrorException,
+ SignedIdentifier)
+from ._deserialize import deserialize_container_properties
+from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions
+from ._models import ( # pylint: disable=unused-import
+ ContainerProperties,
+ BlobProperties,
+ BlobType)
+from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged
+from ._lease import BlobLeaseClient
+from ._blob_client import BlobClient
+
+if TYPE_CHECKING:
+ from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports
+ from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports
+ from datetime import datetime
+ from ._models import ( # pylint: disable=unused-import
+ PublicAccess,
+ AccessPolicy,
+ ContentSettings,
+ StandardBlobTier,
+ PremiumPageBlobTier)
+
+
+def _get_blob_name(blob):
+ """Return the blob name.
+
+ :param blob: A blob string or BlobProperties
+ :rtype: str
+ """
+ try:
+ return blob.get('name')
+ except AttributeError:
+ return blob
+
+
+class ContainerClient(StorageAccountHostsMixin):
+ """A client to interact with a specific container, although that container
+ may not yet exist.
+
+ For operations relating to a specific blob within this container, a blob client can be
+ retrieved using the :func:`~get_blob_client` function.
+
+ :param str account_url:
+ The URI to the storage account. In order to create a client given the full URI to the container,
+ use the :func:`from_container_url` classmethod.
+ :param container_name:
+ The name of the container for the blob.
+ :type container_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword str api_version:
+ The Storage API version to use for requests. Default value is '2019-07-07'.
+ Setting to an older version may result in reduced feature compatibility.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str secondary_hostname:
+ The hostname of the secondary endpoint.
+ :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+ Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+ uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+ the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+ :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+ algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+ :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+ :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+ the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+ :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+ or 4MB.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START create_container_client_from_service]
+ :end-before: [END create_container_client_from_service]
+ :language: python
+ :dedent: 8
+ :caption: Get a ContainerClient from an existing BlobServiceClient.
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START create_container_client_sasurl]
+ :end-before: [END create_container_client_sasurl]
+ :language: python
+ :dedent: 8
+ :caption: Creating the container client directly.
+ """
+ def __init__(
+ self, account_url, # type: str
+ container_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ try:
+ if not account_url.lower().startswith('http'):
+ account_url = "https://" + account_url
+ except AttributeError:
+ raise ValueError("Container URL must be a string.")
+ parsed_url = urlparse(account_url.rstrip('/'))
+ if not container_name:
+ raise ValueError("Please specify a container name.")
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(account_url))
+
+ _, sas_token = parse_query(parsed_url.query)
+ self.container_name = container_name
+ self._query_str, credential = self._format_query_string(sas_token, credential)
+ super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+ self._client = AzureBlobStorage(self.url, pipeline=self._pipeline)
+ self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access
+
+ def _format_url(self, hostname):
+ container_name = self.container_name
+ if isinstance(container_name, six.text_type):
+ container_name = container_name.encode('UTF-8')
+ return "{}://{}/{}{}".format(
+ self.scheme,
+ hostname,
+ quote(container_name),
+ self._query_str)
+
+ @classmethod
+ def from_container_url(cls, container_url, credential=None, **kwargs):
+ # type: (str, Optional[Any], Any) -> ContainerClient
+ """Create ContainerClient from a container url.
+
+ :param str container_url:
+ The full endpoint URL to the Container, including SAS token if used. This could be
+ either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+ :type container_url: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :returns: A container client.
+ :rtype: ~azure.storage.blob.ContainerClient
+ """
+ try:
+ if not container_url.lower().startswith('http'):
+ container_url = "https://" + container_url
+ except AttributeError:
+ raise ValueError("Container URL must be a string.")
+ parsed_url = urlparse(container_url.rstrip('/'))
+ if not parsed_url.netloc:
+ raise ValueError("Invalid URL: {}".format(container_url))
+
+ container_path = parsed_url.path.lstrip('/').split('/')
+ account_path = ""
+ if len(container_path) > 1:
+ account_path = "/" + "/".join(container_path[:-1])
+ account_url = "{}://{}{}?{}".format(
+ parsed_url.scheme,
+ parsed_url.netloc.rstrip('/'),
+ account_path,
+ parsed_url.query)
+ container_name = unquote(container_path[-1])
+ if not container_name:
+ raise ValueError("Invalid URL. Please provide a URL with a valid container name")
+ return cls(account_url, container_name=container_name, credential=credential, **kwargs)
+
+ @classmethod
+ def from_connection_string(
+ cls, conn_str, # type: str
+ container_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ): # type: (...) -> ContainerClient
+ """Create ContainerClient from a Connection String.
+
+ :param str conn_str:
+ A connection string to an Azure Storage account.
+ :param container_name:
+ The container name for the blob.
+ :type container_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token, or the connection string already has shared
+ access key values. The value can be a SAS token string, an account shared access
+ key, or an instance of a TokenCredentials class from azure.identity.
+ Credentials provided here will take precedence over those in the connection string.
+ :returns: A container client.
+ :rtype: ~azure.storage.blob.ContainerClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START auth_from_connection_string_container]
+ :end-before: [END auth_from_connection_string_container]
+ :language: python
+ :dedent: 8
+ :caption: Creating the ContainerClient from a connection string.
+ """
+ account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+ if 'secondary_hostname' not in kwargs:
+ kwargs['secondary_hostname'] = secondary
+ return cls(
+ account_url, container_name=container_name, credential=credential, **kwargs)
+
+ @distributed_trace
+ def create_container(self, metadata=None, public_access=None, **kwargs):
+ # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None
+ """
+ Creates a new container under the specified account. If the container
+ with the same name already exists, the operation fails.
+
+ :param metadata:
+ A dict with name_value pairs to associate with the
+ container as metadata. Example:{'Category':'test'}
+ :type metadata: dict[str, str]
+ :param ~azure.storage.blob.PublicAccess public_access:
+ Possible values include: 'container', 'blob'.
+ :keyword container_encryption_scope:
+ Specifies the default encryption scope to set on the container and use for
+ all future writes.
+
+ .. versionadded:: 12.2.0
+
+ :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START create_container]
+ :end-before: [END create_container]
+ :language: python
+ :dedent: 12
+ :caption: Creating a container to store blobs.
+ """
+ headers = kwargs.pop('headers', {})
+ timeout = kwargs.pop('timeout', None)
+ headers.update(add_metadata_headers(metadata)) # type: ignore
+ container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
+ try:
+ return self._client.container.create( # type: ignore
+ timeout=timeout,
+ access=public_access,
+ container_cpk_scope_info=container_cpk_scope_info,
+ cls=return_response_headers,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def delete_container(
+ self, **kwargs):
+ # type: (Any) -> None
+ """
+ Marks the specified container for deletion. The container and any blobs
+ contained within it are later deleted during garbage collection.
+
+ :keyword lease:
+ If specified, delete_container only succeeds if the
+ container's lease is active and matches this ID.
+ Required if the container has an active lease.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START delete_container]
+ :end-before: [END delete_container]
+ :language: python
+ :dedent: 12
+ :caption: Delete a container.
+ """
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ mod_conditions = get_modify_conditions(kwargs)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ self._client.container.delete(
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def acquire_lease(
+ self, lease_duration=-1, # type: int
+ lease_id=None, # type: Optional[str]
+ **kwargs):
+ # type: (...) -> BlobLeaseClient
+ """
+ Requests a new lease. If the container does not have an active lease,
+ the Blob service creates a lease on the container and returns a new
+ lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The Blob service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A BlobLeaseClient object, that can be run in a context manager.
+ :rtype: ~azure.storage.blob.BlobLeaseClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START acquire_lease_on_container]
+ :end-before: [END acquire_lease_on_container]
+ :language: python
+ :dedent: 8
+ :caption: Acquiring a lease on the container.
+ """
+ lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
+ return lease
+
+ @distributed_trace
+ def get_account_information(self, **kwargs):
+ # type: (**Any) -> Dict[str, str]
+ """Gets information related to the storage account.
+
+ The information can also be retrieved if the user has a SAS to a container or blob.
+ The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+ :returns: A dict of account information (SKU and account type).
+ :rtype: dict(str, str)
+ """
+ try:
+ return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def get_container_properties(self, **kwargs):
+ # type: (Any) -> ContainerProperties
+ """Returns all user-defined metadata and system properties for the specified
+ container. The data returned does not include the container's list of blobs.
+
+ :keyword lease:
+ If specified, get_container_properties only succeeds if the
+ container's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Properties for the specified container within a container object.
+ :rtype: ~azure.storage.blob.ContainerProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START get_container_properties]
+ :end-before: [END get_container_properties]
+ :language: python
+ :dedent: 12
+ :caption: Getting properties on the container.
+ """
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ response = self._client.container.get_properties(
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ cls=deserialize_container_properties,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ response.name = self.container_name
+ return response # type: ignore
+
+ @distributed_trace
+ def set_container_metadata( # type: ignore
+ self, metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets one or more user-defined name-value pairs for the specified
+ container. Each call to this operation replaces all existing metadata
+ attached to the container. To remove all metadata from the container,
+ call this operation with no metadata dict.
+
+ :param metadata:
+ A dict containing name-value pairs to associate with the container as
+ metadata. Example: {'category':'test'}
+ :type metadata: dict[str, str]
+ :keyword lease:
+ If specified, set_container_metadata only succeeds if the
+ container's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Container-updated property dict (Etag and last modified).
+ :rtype: dict[str, str or datetime]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START set_container_metadata]
+ :end-before: [END set_container_metadata]
+ :language: python
+ :dedent: 12
+ :caption: Setting metadata on the container.
+ """
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ mod_conditions = get_modify_conditions(kwargs)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ return self._client.container.set_metadata( # type: ignore
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def get_container_access_policy(self, **kwargs):
+ # type: (Any) -> Dict[str, Any]
+ """Gets the permissions for the specified container.
+ The permissions indicate whether container data may be accessed publicly.
+
+ :keyword lease:
+ If specified, get_container_access_policy only succeeds if the
+ container's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Access policy information in a dict.
+ :rtype: dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START get_container_access_policy]
+ :end-before: [END get_container_access_policy]
+ :language: python
+ :dedent: 12
+ :caption: Getting the access policy on the container.
+ """
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ response, identifiers = self._client.container.get_access_policy(
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ cls=return_headers_and_deserialized,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return {
+ 'public_access': response.get('blob_public_access'),
+ 'signed_identifiers': identifiers or []
+ }
+
+ @distributed_trace
+ def set_container_access_policy(
+ self, signed_identifiers, # type: Dict[str, AccessPolicy]
+ public_access=None, # type: Optional[Union[str, PublicAccess]]
+ **kwargs
+ ): # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets the permissions for the specified container or stored access
+ policies that may be used with Shared Access Signatures. The permissions
+ indicate whether blobs in a container may be accessed publicly.
+
+ :param signed_identifiers:
+ A dictionary of access policies to associate with the container. The
+ dictionary may contain up to 5 elements. An empty dictionary
+ will clear the access policies set on the service.
+ :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
+ :param ~azure.storage.blob.PublicAccess public_access:
+ Possible values include: 'container', 'blob'.
+ :keyword lease:
+ Required if the container has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified date/time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Container-updated property dict (Etag and last modified).
+ :rtype: dict[str, str or ~datetime.datetime]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START set_container_access_policy]
+ :end-before: [END set_container_access_policy]
+ :language: python
+ :dedent: 12
+ :caption: Setting access policy on the container.
+ """
+ if len(signed_identifiers) > 5:
+ raise ValueError(
+ 'Too many access policies provided. The server does not support setting '
+ 'more than 5 access policies on a single resource.')
+ identifiers = []
+ for key, value in signed_identifiers.items():
+ if value:
+ value.start = serialize_iso(value.start)
+ value.expiry = serialize_iso(value.expiry)
+ identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
+ signed_identifiers = identifiers # type: ignore
+ lease = kwargs.pop('lease', None)
+ mod_conditions = get_modify_conditions(kwargs)
+ access_conditions = get_access_conditions(lease)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ return self._client.container.set_access_policy(
+ container_acl=signed_identifiers or None,
+ timeout=timeout,
+ access=public_access,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def list_blobs(self, name_starts_with=None, include=None, **kwargs):
+ # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties]
+ """Returns a generator to list the blobs under the specified container.
+ The generator will lazily follow the continuation tokens returned by
+ the service.
+
+ :param str name_starts_with:
+ Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param list[str] or str include:
+ Specifies one or more additional datasets to include in the response.
+ Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of BlobProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START list_blobs_in_container]
+ :end-before: [END list_blobs_in_container]
+ :language: python
+ :dedent: 8
+ :caption: List the blobs in the container.
+ """
+ if include and not isinstance(include, list):
+ include = [include]
+
+ results_per_page = kwargs.pop('results_per_page', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._client.container.list_blob_flat_segment,
+ include=include,
+ timeout=timeout,
+ **kwargs)
+ return ItemPaged(
+ command, prefix=name_starts_with, results_per_page=results_per_page,
+ page_iterator_class=BlobPropertiesPaged)
+
+ @distributed_trace
+ def walk_blobs(
+ self, name_starts_with=None, # type: Optional[str]
+ include=None, # type: Optional[Any]
+ delimiter="/", # type: str
+ **kwargs # type: Optional[Any]
+ ):
+ # type: (...) -> ItemPaged[BlobProperties]
+ """Returns a generator to list the blobs under the specified container.
+ The generator will lazily follow the continuation tokens returned by
+ the service. This operation will list blobs in accordance with a hierarchy,
+ as delimited by the specified delimiter character.
+
+ :param str name_starts_with:
+ Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param list[str] include:
+ Specifies one or more additional datasets to include in the response.
+ Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'.
+ :param str delimiter:
+ When the request includes this parameter, the operation returns a BlobPrefix
+ element in the response body that acts as a placeholder for all blobs whose
+ names begin with the same substring up to the appearance of the delimiter
+ character. The delimiter may be a single character or a string.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of BlobProperties.
+ :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
+ """
+ if include and not isinstance(include, list):
+ include = [include]
+
+ results_per_page = kwargs.pop('results_per_page', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._client.container.list_blob_hierarchy_segment,
+ delimiter=delimiter,
+ include=include,
+ timeout=timeout,
+ **kwargs)
+ return BlobPrefix(
+ command,
+ prefix=name_starts_with,
+ results_per_page=results_per_page,
+ delimiter=delimiter)
+
+ @distributed_trace
+ def upload_blob(
+ self, name, # type: Union[str, BlobProperties]
+ data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ blob_type=BlobType.BlockBlob, # type: Union[str, BlobType]
+ length=None, # type: Optional[int]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> BlobClient
+ """Creates a new blob from a data source with automatic chunking.
+
+ :param name: The blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type name: str or ~azure.storage.blob.BlobProperties
+ :param data: The blob data to upload.
+ :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+ either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+ If True, upload_blob will overwrite the existing data. If set to False, the
+ operation will fail with ResourceExistsError. The exception to the above is with Append
+ blob types: if set to False and the data already exists, an error will not be raised
+ and the data will be appended to the existing blob. If set overwrite=True, then the existing
+ append blob will be deleted, and a new one created. Defaults to False.
+ :keyword ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used, because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the container has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int max_concurrency:
+ Maximum number of parallel connections to use when the blob size exceeds
+ 64MB.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :returns: A BlobClient to interact with the newly uploaded blob.
+ :rtype: ~azure.storage.blob.BlobClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START upload_blob_to_container]
+ :end-before: [END upload_blob_to_container]
+ :language: python
+ :dedent: 8
+ :caption: Upload blob to the container.
+ """
+ blob = self.get_blob_client(name)
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ encoding = kwargs.pop('encoding', 'UTF-8')
+ blob.upload_blob(
+ data,
+ blob_type=blob_type,
+ length=length,
+ metadata=metadata,
+ timeout=timeout,
+ encoding=encoding,
+ **kwargs
+ )
+ return blob
+
+ @distributed_trace
+ def delete_blob(
+ self, blob, # type: Union[str, BlobProperties]
+ delete_snapshots=None, # type: Optional[str]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Marks the specified blob or snapshot for deletion.
+
+ The blob is later deleted during garbage collection.
+ Note that in order to delete a blob, you must delete all of its
+ snapshots. You can delete both at the same time with the delete_blob
+ operation.
+
+ If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
+ and retains the blob or snapshot for specified number of days.
+ After specified number of days, blob's data is removed from the service during garbage collection.
+ Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+ option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()`
+
+ :param blob: The blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param str delete_snapshots:
+ Required if the blob has associated snapshots. Values include:
+ - "only": Deletes only the blobs snapshots.
+ - "include": Deletes the blob along with all snapshots.
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to delete.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ blob_client = self.get_blob_client(blob) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ blob_client.delete_blob( # type: ignore
+ delete_snapshots=delete_snapshots,
+ timeout=timeout,
+ **kwargs)
+
+ @distributed_trace
+ def download_blob(self, blob, offset=None, length=None, **kwargs):
+ # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader
+ """Downloads a blob to the StorageStreamDownloader. The readall() method must
+ be used to read all the content or readinto() must be used to download the blob into
+ a stream.
+
+ :param blob: The blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param int offset:
+ Start of byte range to use for downloading a section of the blob.
+ Must be set if length is provided.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the blob has an active lease. If specified, download_blob only
+ succeeds if the blob's lease is active and matches this ID. Value can be a
+ BlobLeaseClient object or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword str encoding:
+ Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: A streaming object (StorageStreamDownloader)
+ :rtype: ~azure.storage.blob.StorageStreamDownloader
+ """
+ blob_client = self.get_blob_client(blob) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ return blob_client.download_blob(offset=offset, length=length, **kwargs)
+
+ def _generate_delete_blobs_subrequest_options(
+ self, snapshot=None,
+ delete_snapshots=None,
+ lease_access_conditions=None,
+ modified_access_conditions=None,
+ **kwargs
+ ):
+ """This code is a copy from _generated.
+
+ Once Autorest is able to provide request preparation this code should be removed.
+ """
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct parameters
+ timeout = kwargs.pop('timeout', None)
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access
+ if timeout is not None:
+ query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access
+
+ # Construct headers
+ header_parameters = {}
+ if delete_snapshots is not None:
+ header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access
+ "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access
+ "lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access
+ "if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access
+ "if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access
+ "if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access
+ "if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access
+
+ return query_parameters, header_parameters
+
+ def _generate_delete_blobs_options(self,
+ *blobs, # type: List[Union[str, BlobProperties, dict]]
+ **kwargs
+ ):
+ timeout = kwargs.pop('timeout', None)
+ raise_on_any_failure = kwargs.pop('raise_on_any_failure', True)
+ delete_snapshots = kwargs.pop('delete_snapshots', None)
+ if_modified_since = kwargs.pop('if_modified_since', None)
+ if_unmodified_since = kwargs.pop('if_unmodified_since', None)
+ if_tags_match_condition = kwargs.pop('if_tags_match_condition', None)
+ kwargs.update({'raise_on_any_failure': raise_on_any_failure,
+ 'sas': self._query_str.replace('?', '&'),
+ 'timeout': '&timeout=' + str(timeout) if timeout else ""
+ })
+
+ reqs = []
+ for blob in blobs:
+ blob_name = _get_blob_name(blob)
+ container_name = self.container_name
+
+ try:
+ options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access
+ snapshot=blob.get('snapshot'),
+ delete_snapshots=delete_snapshots or blob.get('delete_snapshots'),
+ lease=blob.get('lease_id'),
+ if_modified_since=if_modified_since or blob.get('if_modified_since'),
+ if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'),
+ etag=blob.get('etag'),
+ if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'),
+ match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag')
+ else None,
+ timeout=blob.get('timeout'),
+ )
+ except AttributeError:
+ options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access
+ delete_snapshots=delete_snapshots,
+ if_modified_since=if_modified_since,
+ if_unmodified_since=if_unmodified_since,
+ if_tags_match_condition=if_tags_match_condition
+ )
+
+ query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options)
+
+ req = HttpRequest(
+ "DELETE",
+ "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str),
+ headers=header_parameters
+ )
+ req.format_parameters(query_parameters)
+ reqs.append(req)
+
+ return reqs, kwargs
+
+ @distributed_trace
+ def delete_blobs(self, *blobs, **kwargs):
+ # type: (...) -> Iterator[HttpResponse]
+ """Marks the specified blobs or snapshots for deletion.
+
+ The blobs are later deleted during garbage collection.
+ Note that in order to delete blobs, you must delete all of their
+ snapshots. You can delete both at the same time with the delete_blobs operation.
+
+ If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
+ and retains the blobs or snapshots for specified number of days.
+ After specified number of days, blobs' data is removed from the service during garbage collection.
+ Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+ Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()`
+
+ :param blobs:
+ The blobs to delete. This can be a single blob, or multiple values can
+ be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+ .. note::
+ When the blob type is dict, here's a list of keys, value rules.
+
+ blob name:
+ key: 'name', value type: str
+ snapshot you want to delete:
+ key: 'snapshot', value type: str
+ whether to delete snapthots when deleting blob:
+ key: 'delete_snapshots', value: 'include' or 'only'
+ if the blob modified or not:
+ key: 'if_modified_since', 'if_unmodified_since', value type: datetime
+ etag:
+ key: 'etag', value type: str
+ match the etag or not:
+ key: 'match_condition', value type: MatchConditions
+ tags match condition:
+ key: 'if_tags_match_condition', value type: str
+ lease:
+ key: 'lease_id', value type: Union[str, LeaseClient]
+ timeout for subrequest:
+ key: 'timeout', value type: int
+
+ :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
+ :keyword str delete_snapshots:
+ Required if a blob has associated snapshots. Values include:
+ - "only": Deletes only the blobs snapshots.
+ - "include": Deletes the blob along with all snapshots.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword bool raise_on_any_failure:
+ This is a boolean param which defaults to True. When this is set, an exception
+ is raised even if there is a single operation failure.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: An iterator of responses, one for each blob in order
+ :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common.py
+ :start-after: [START delete_multiple_blobs]
+ :end-before: [END delete_multiple_blobs]
+ :language: python
+ :dedent: 8
+ :caption: Deleting multiple blobs.
+ """
+ if len(blobs) == 0:
+ return iter(list())
+
+ reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs)
+
+ return self._batch_send(*reqs, **options)
+
+ def _generate_set_tiers_subrequest_options(
+ self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs
+ ):
+ """This code is a copy from _generated.
+
+ Once Autorest is able to provide request preparation this code should be removed.
+ """
+ if not tier:
+ raise ValueError("A blob tier must be specified")
+ if snapshot and version_id:
+ raise ValueError("Snapshot and version_id cannot be set at the same time")
+ if_tags = kwargs.pop('if_tags', None)
+
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ comp = "tier"
+ timeout = kwargs.pop('timeout', None)
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access
+ if version_id is not None:
+ query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access
+ if timeout is not None:
+ query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access
+ query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call
+ if rehydrate_priority is not None:
+ header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access
+ "rehydrate_priority", rehydrate_priority, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access
+
+ return query_parameters, header_parameters
+
+ def _generate_set_tiers_options(self,
+ blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]]
+ *blobs, # type: List[Union[str, BlobProperties, dict]]
+ **kwargs
+ ):
+ timeout = kwargs.pop('timeout', None)
+ raise_on_any_failure = kwargs.pop('raise_on_any_failure', True)
+ rehydrate_priority = kwargs.pop('rehydrate_priority', None)
+ if_tags = kwargs.pop('if_tags_match_condition', None)
+ kwargs.update({'raise_on_any_failure': raise_on_any_failure,
+ 'sas': self._query_str.replace('?', '&'),
+ 'timeout': '&timeout=' + str(timeout) if timeout else ""
+ })
+
+ reqs = []
+ for blob in blobs:
+ blob_name = _get_blob_name(blob)
+ container_name = self.container_name
+
+ try:
+ tier = blob_tier or blob.get('blob_tier')
+ query_parameters, header_parameters = self._generate_set_tiers_subrequest_options(
+ tier=tier,
+ snapshot=blob.get('snapshot'),
+ version_id=blob.get('version_id'),
+ rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'),
+ lease_access_conditions=blob.get('lease_id'),
+ if_tags=if_tags or blob.get('if_tags_match_condition'),
+ timeout=timeout or blob.get('timeout')
+ )
+ except AttributeError:
+ query_parameters, header_parameters = self._generate_set_tiers_subrequest_options(
+ blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags)
+
+ req = HttpRequest(
+ "PUT",
+ "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str),
+ headers=header_parameters
+ )
+ req.format_parameters(query_parameters)
+ reqs.append(req)
+
+ return reqs, kwargs
+
+ @distributed_trace
+ def set_standard_blob_tier_blobs(
+ self,
+ standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]]
+ *blobs, # type: List[Union[str, BlobProperties, dict]]
+ **kwargs
+ ):
+ # type: (...) -> Iterator[HttpResponse]
+ """This operation sets the tier on block blobs.
+
+ A block blob's tier determines Hot/Cool/Archive storage type.
+ This operation does not update the blob's ETag.
+
+ :param standard_blob_tier:
+ Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool',
+ 'Archive'. The hot tier is optimized for storing data that is accessed
+ frequently. The cool storage tier is optimized for storing data that
+ is infrequently accessed and stored for at least a month. The archive
+ tier is optimized for storing data that is rarely accessed and stored
+ for at least six months with flexible latency requirements.
+
+ .. note::
+ If you want to set different tier on different blobs please set this positional parameter to None.
+ Then the blob tier on every BlobProperties will be taken.
+
+ :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+ :param blobs:
+ The blobs with which to interact. This can be a single blob, or multiple values can
+ be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+ .. note::
+ When the blob type is dict, here's a list of keys, value rules.
+
+ blob name:
+ key: 'name', value type: str
+ standard blob tier:
+ key: 'blob_tier', value type: StandardBlobTier
+ rehydrate priority:
+ key: 'rehydrate_priority', value type: RehydratePriority
+ lease:
+ key: 'lease_id', value type: Union[str, LeaseClient]
+ snapshot:
+ key: "snapshost", value type: str
+ version id:
+ key: "version_id", value type: str
+ tags match condition:
+ key: 'if_tags_match_condition', value type: str
+ timeout for subrequest:
+ key: 'timeout', value type: int
+
+ :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
+ :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+ Indicates the priority with which to rehydrate an archived blob
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword bool raise_on_any_failure:
+ This is a boolean param which defaults to True. When this is set, an exception
+ is raised even if there is a single operation failure.
+ :return: An iterator of responses, one for each blob in order
+ :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse]
+ """
+ reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs)
+
+ return self._batch_send(*reqs, **options)
+
+ @distributed_trace
+ def set_premium_page_blob_tier_blobs(
+ self,
+ premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]]
+ *blobs, # type: List[Union[str, BlobProperties, dict]]
+ **kwargs
+ ):
+ # type: (...) -> Iterator[HttpResponse]
+ """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts.
+
+ :param premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+
+ .. note::
+ If you want to set different tier on different blobs please set this positional parameter to None.
+ Then the blob tier on every BlobProperties will be taken.
+
+ :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+ :param blobs:
+ The blobs with which to interact. This can be a single blob, or multiple values can
+ be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+ .. note::
+ When the blob type is dict, here's a list of keys, value rules.
+
+ blob name:
+ key: 'name', value type: str
+ premium blob tier:
+ key: 'blob_tier', value type: PremiumPageBlobTier
+ lease:
+ key: 'lease_id', value type: Union[str, LeaseClient]
+ timeout for subrequest:
+ key: 'timeout', value type: int
+
+ :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :keyword bool raise_on_any_failure:
+ This is a boolean param which defaults to True. When this is set, an exception
+ is raised even if there is a single operation failure.
+ :return: An iterator of responses, one for each blob in order
+ :rtype: iterator[~azure.core.pipeline.transport.HttpResponse]
+ """
+ reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs)
+
+ return self._batch_send(*reqs, **options)
+
+ def get_blob_client(
+ self, blob, # type: Union[str, BlobProperties]
+ snapshot=None # type: str
+ ):
+ # type: (...) -> BlobClient
+ """Get a client to interact with the specified blob.
+
+ The blob need not already exist.
+
+ :param blob:
+ The blob with which to interact.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param str snapshot:
+ The optional blob snapshot on which to operate. This can be the snapshot ID string
+ or the response returned from :func:`~BlobClient.create_snapshot()`.
+ :returns: A BlobClient.
+ :rtype: ~azure.storage.blob.BlobClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START get_blob_client]
+ :end-before: [END get_blob_client]
+ :language: python
+ :dedent: 8
+ :caption: Get the blob client.
+ """
+ blob_name = _get_blob_name(blob)
+ _pipeline = Pipeline(
+ transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return BlobClient(
+ self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
+ credential=self.credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_deserialize.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_deserialize.py
new file mode 100644
index 00000000000..159e0e676c8
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_deserialize.py
@@ -0,0 +1,158 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+from typing import ( # pylint: disable=unused-import
+ Tuple, Dict, List,
+ TYPE_CHECKING
+)
+
+from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties
+from ._shared.models import get_enum_value
+
+from ._shared.response_handlers import deserialize_metadata
+from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
+ StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule
+
+if TYPE_CHECKING:
+ from ._generated.models import PageList
+
+
+def deserialize_blob_properties(response, obj, headers):
+ blob_properties = BlobProperties(
+ metadata=deserialize_metadata(response, obj, headers),
+ object_replication_source_properties=deserialize_ors_policies(response.headers),
+ **headers
+ )
+ if 'Content-Range' in headers:
+ if 'x-ms-blob-content-md5' in headers:
+ blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
+ else:
+ blob_properties.content_settings.content_md5 = None
+ return blob_properties
+
+
+def deserialize_ors_policies(policy_dictionary):
+
+ if policy_dictionary is None:
+ return None
+ # For source blobs (blobs that have policy ids and rule ids applied to them),
+ # the header will be formatted as "x-ms-or-_: {Complete, Failed}".
+ # The value of this header is the status of the replication.
+ or_policy_status_headers = {key: val for key, val in policy_dictionary.items()
+ if 'or-' in key and key != 'x-ms-or-policy-id'}
+
+ parsed_result = {}
+
+ for key, val in or_policy_status_headers.items():
+ # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule
+ policy_and_rule_ids = key.split('or-')[1].split('_')
+ policy_id = policy_and_rule_ids[0]
+ rule_id = policy_and_rule_ids[1]
+
+ # If we are seeing this policy for the first time, create a new list to store rule_id -> result
+ parsed_result[policy_id] = parsed_result.get(policy_id) or list()
+ parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val))
+
+ result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()]
+
+ return result_list
+
+
+def deserialize_blob_stream(response, obj, headers):
+ blob_properties = deserialize_blob_properties(response, obj, headers)
+ obj.properties = blob_properties
+ return response.location_mode, obj
+
+
+def deserialize_container_properties(response, obj, headers):
+ metadata = deserialize_metadata(response, obj, headers)
+ container_properties = ContainerProperties(
+ metadata=metadata,
+ **headers
+ )
+ return container_properties
+
+
+def get_page_ranges_result(ranges):
+ # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
+ page_range = [] # type: ignore
+ clear_range = [] # type: List
+ if ranges.page_range:
+ page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore
+ if ranges.clear_range:
+ clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range]
+ return page_range, clear_range # type: ignore
+
+
+def service_stats_deserialize(generated):
+ """Deserialize a ServiceStats objects into a dict.
+ """
+ return {
+ 'geo_replication': {
+ 'status': generated.geo_replication.status,
+ 'last_sync_time': generated.geo_replication.last_sync_time,
+ }
+ }
+
+
+def service_properties_deserialize(generated):
+ """Deserialize a ServiceProperties objects into a dict.
+ """
+ return {
+ 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access
+ 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access
+ 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access
+ 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access
+ 'target_version': generated.default_service_version, # pylint: disable=protected-access
+ 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access
+ 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access
+ }
+
+
+def get_blob_properties_from_generated_code(generated):
+ blob = BlobProperties()
+ blob.name = generated.name
+ blob_type = get_enum_value(generated.properties.blob_type)
+ blob.blob_type = BlobType(blob_type) if blob_type else None
+ blob.etag = generated.properties.etag
+ blob.deleted = generated.deleted
+ blob.snapshot = generated.snapshot
+ blob.is_append_blob_sealed = generated.properties.is_sealed
+ blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
+ blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
+ blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
+ blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access
+ blob.last_modified = generated.properties.last_modified
+ blob.creation_time = generated.properties.creation_time
+ blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access
+ blob.size = generated.properties.content_length
+ blob.page_blob_sequence_number = generated.properties.blob_sequence_number
+ blob.server_encrypted = generated.properties.server_encrypted
+ blob.encryption_scope = generated.properties.encryption_scope
+ blob.deleted_time = generated.properties.deleted_time
+ blob.remaining_retention_days = generated.properties.remaining_retention_days
+ blob.blob_tier = generated.properties.access_tier
+ blob.rehydrate_priority = generated.properties.rehydrate_priority
+ blob.blob_tier_inferred = generated.properties.access_tier_inferred
+ blob.archive_status = generated.properties.archive_status
+ blob.blob_tier_change_time = generated.properties.access_tier_change_time
+ blob.version_id = generated.version_id
+ blob.is_current_version = generated.is_current_version
+ blob.tag_count = generated.properties.tag_count
+ blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access
+ blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata)
+ blob.last_accessed_on = generated.properties.last_accessed_on
+ return blob
+
+
+def parse_tags(generated_tags):
+ # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None]
+ """Deserialize a list of BlobTag objects into a dict.
+ """
+ if generated_tags:
+ tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
+ return tag_dict
+ return None
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_download.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_download.py
new file mode 100644
index 00000000000..46e59e5d249
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_download.py
@@ -0,0 +1,580 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import sys
+import threading
+import warnings
+from io import BytesIO
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.common import with_current_context
+from ._shared.encryption import decrypt_blob
+from ._shared.request_handlers import validate_and_format_range_headers
+from ._shared.response_handlers import process_storage_error, parse_length_from_content_range
+from ._deserialize import get_page_ranges_result
+
+
+def process_range_and_offset(start_range, end_range, length, encryption):
+ start_offset, end_offset = 0, 0
+ if encryption.get("key") is not None or encryption.get("resolver") is not None:
+ if start_range is not None:
+ # Align the start of the range along a 16 byte block
+ start_offset = start_range % 16
+ start_range -= start_offset
+
+ # Include an extra 16 bytes for the IV if necessary
+ # Because of the previous offsetting, start_range will always
+ # be a multiple of 16.
+ if start_range > 0:
+ start_offset += 16
+ start_range -= 16
+
+ if length is not None:
+ # Align the end of the range along a 16 byte block
+ end_offset = 15 - (end_range % 16)
+ end_range += end_offset
+
+ return (start_range, end_range), (start_offset, end_offset)
+
+
+def process_content(data, start_offset, end_offset, encryption):
+ if data is None:
+ raise ValueError("Response cannot be None.")
+ try:
+ content = b"".join(list(data))
+ except Exception as error:
+ raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error)
+ if content and encryption.get("key") is not None or encryption.get("resolver") is not None:
+ try:
+ return decrypt_blob(
+ encryption.get("required"),
+ encryption.get("key"),
+ encryption.get("resolver"),
+ content,
+ start_offset,
+ end_offset,
+ data.response.headers,
+ )
+ except Exception as error:
+ raise HttpResponseError(message="Decryption failed.", response=data.response, error=error)
+ return content
+
+
+class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes
+ def __init__(
+ self,
+ client=None,
+ non_empty_ranges=None,
+ total_size=None,
+ chunk_size=None,
+ current_progress=None,
+ start_range=None,
+ end_range=None,
+ stream=None,
+ parallel=None,
+ validate_content=None,
+ encryption_options=None,
+ **kwargs
+ ):
+ self.client = client
+ self.non_empty_ranges = non_empty_ranges
+
+ # Information on the download range/chunk size
+ self.chunk_size = chunk_size
+ self.total_size = total_size
+ self.start_index = start_range
+ self.end_index = end_range
+
+ # The destination that we will write to
+ self.stream = stream
+ self.stream_lock = threading.Lock() if parallel else None
+ self.progress_lock = threading.Lock() if parallel else None
+
+ # For a parallel download, the stream is always seekable, so we note down the current position
+ # in order to seek to the right place when out-of-order chunks come in
+ self.stream_start = stream.tell() if parallel else None
+
+ # Download progress so far
+ self.progress_total = current_progress
+
+ # Encryption
+ self.encryption_options = encryption_options
+
+ # Parameters for each get operation
+ self.validate_content = validate_content
+ self.request_options = kwargs
+
+ def _calculate_range(self, chunk_start):
+ if chunk_start + self.chunk_size > self.end_index:
+ chunk_end = self.end_index
+ else:
+ chunk_end = chunk_start + self.chunk_size
+ return chunk_start, chunk_end
+
+ def get_chunk_offsets(self):
+ index = self.start_index
+ while index < self.end_index:
+ yield index
+ index += self.chunk_size
+
+ def process_chunk(self, chunk_start):
+ chunk_start, chunk_end = self._calculate_range(chunk_start)
+ chunk_data = self._download_chunk(chunk_start, chunk_end - 1)
+ length = chunk_end - chunk_start
+ if length > 0:
+ self._write_to_stream(chunk_data, chunk_start)
+ self._update_progress(length)
+
+ def yield_chunk(self, chunk_start):
+ chunk_start, chunk_end = self._calculate_range(chunk_start)
+ return self._download_chunk(chunk_start, chunk_end - 1)
+
+ def _update_progress(self, length):
+ if self.progress_lock:
+ with self.progress_lock: # pylint: disable=not-context-manager
+ self.progress_total += length
+ else:
+ self.progress_total += length
+
+ def _write_to_stream(self, chunk_data, chunk_start):
+ if self.stream_lock:
+ with self.stream_lock: # pylint: disable=not-context-manager
+ self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+ self.stream.write(chunk_data)
+ else:
+ self.stream.write(chunk_data)
+
+ def _do_optimize(self, given_range_start, given_range_end):
+ # If we have no page range list stored, then assume there's data everywhere for that page blob
+ # or it's a block blob or append blob
+ if self.non_empty_ranges is None:
+ return False
+
+ for source_range in self.non_empty_ranges:
+ # Case 1: As the range list is sorted, if we've reached such a source_range
+ # we've checked all the appropriate source_range already and haven't found any overlapping.
+ # so the given range doesn't have any data and download optimization could be applied.
+ # given range: | |
+ # source range: | |
+ if given_range_end < source_range['start']: # pylint:disable=no-else-return
+ return True
+ # Case 2: the given range comes after source_range, continue checking.
+ # given range: | |
+ # source range: | |
+ elif source_range['end'] < given_range_start:
+ pass
+ # Case 3: source_range and given range overlap somehow, no need to optimize.
+ else:
+ return False
+ # Went through all src_ranges, but nothing overlapped. Optimization will be applied.
+ return True
+
+ def _download_chunk(self, chunk_start, chunk_end):
+ download_range, offset = process_range_and_offset(
+ chunk_start, chunk_end, chunk_end, self.encryption_options
+ )
+
+ # No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
+ # Do optimize and create empty chunk locally if condition is met.
+ if self._do_optimize(download_range[0], download_range[1]):
+ chunk_data = b"\x00" * self.chunk_size
+ else:
+ range_header, range_validation = validate_and_format_range_headers(
+ download_range[0],
+ download_range[1],
+ check_content_md5=self.validate_content
+ )
+
+ try:
+ _, response = self.client.download(
+ range=range_header,
+ range_get_content_md5=range_validation,
+ validate_content=self.validate_content,
+ data_stream_total=self.total_size,
+ download_stream_current=self.progress_total,
+ **self.request_options
+ )
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ chunk_data = process_content(response, offset[0], offset[1], self.encryption_options)
+
+ # This makes sure that if_match is set so that we can validate
+ # that subsequent downloads are to an unmodified blob
+ if self.request_options.get("modified_access_conditions"):
+ self.request_options["modified_access_conditions"].if_match = response.properties.etag
+
+ return chunk_data
+
+
+class _ChunkIterator(object):
+ """Async iterator for chunks in blob download stream."""
+
+ def __init__(self, size, content, downloader):
+ self.size = size
+ self._current_content = content
+ self._iter_downloader = downloader
+ self._iter_chunks = None
+ self._complete = (size == 0)
+
+ def __len__(self):
+ return self.size
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ """Iterate through responses."""
+ if self._complete:
+ raise StopIteration("Download complete")
+ if not self._iter_downloader:
+ # If no iterator was supplied, the download completed with
+ # the initial GET, so we just return that data
+ self._complete = True
+ return self._current_content
+
+ if not self._iter_chunks:
+ self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+ else:
+ chunk = next(self._iter_chunks)
+ self._current_content = self._iter_downloader.yield_chunk(chunk)
+
+ return self._current_content
+
+ next = __next__ # Python 2 compatibility.
+
+
+class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes
+ """A streaming object to download from Azure Storage.
+
+ :ivar str name:
+ The name of the blob being downloaded.
+ :ivar str container:
+ The name of the container where the blob is.
+ :ivar ~azure.storage.blob.BlobProperties properties:
+ The properties of the blob being downloaded. If only a range of the data is being
+ downloaded, this will be reflected in the properties.
+ :ivar int size:
+ The size of the total data in the stream. This will be the byte range if specified,
+ otherwise the total size of the blob.
+ """
+
+ def __init__(
+ self,
+ clients=None,
+ config=None,
+ start_range=None,
+ end_range=None,
+ validate_content=None,
+ encryption_options=None,
+ max_concurrency=1,
+ name=None,
+ container=None,
+ encoding=None,
+ **kwargs
+ ):
+ self.name = name
+ self.container = container
+ self.properties = None
+ self.size = None
+
+ self._clients = clients
+ self._config = config
+ self._start_range = start_range
+ self._end_range = end_range
+ self._max_concurrency = max_concurrency
+ self._encoding = encoding
+ self._validate_content = validate_content
+ self._encryption_options = encryption_options or {}
+ self._request_options = kwargs
+ self._location_mode = None
+ self._download_complete = False
+ self._current_content = None
+ self._file_size = None
+ self._non_empty_ranges = None
+ self._response = None
+
+ # The service only provides transactional MD5s for chunks under 4MB.
+ # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+ # chunk so a transactional MD5 can be retrieved.
+ self._first_get_size = (
+ self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size
+ )
+ initial_request_start = self._start_range if self._start_range is not None else 0
+ if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
+ initial_request_end = self._end_range
+ else:
+ initial_request_end = initial_request_start + self._first_get_size - 1
+
+ self._initial_range, self._initial_offset = process_range_and_offset(
+ initial_request_start, initial_request_end, self._end_range, self._encryption_options
+ )
+
+ self._response = self._initial_request()
+ self.properties = self._response.properties
+ self.properties.name = self.name
+ self.properties.container = self.container
+
+ # Set the content length to the download size instead of the size of
+ # the last range
+ self.properties.size = self.size
+
+ # Overwrite the content range to the user requested range
+ self.properties.content_range = "bytes {0}-{1}/{2}".format(
+ self._start_range,
+ self._end_range,
+ self._file_size
+ )
+
+ # Overwrite the content MD5 as it is the MD5 for the last range instead
+ # of the stored MD5
+ # TODO: Set to the stored MD5 when the service returns this
+ self.properties.content_md5 = None
+
+ if self.size == 0:
+ self._current_content = b""
+ else:
+ self._current_content = process_content(
+ self._response,
+ self._initial_offset[0],
+ self._initial_offset[1],
+ self._encryption_options
+ )
+
+ def __len__(self):
+ return self.size
+
+ def _initial_request(self):
+ range_header, range_validation = validate_and_format_range_headers(
+ self._initial_range[0],
+ self._initial_range[1],
+ start_range_required=False,
+ end_range_required=False,
+ check_content_md5=self._validate_content
+ )
+
+ try:
+ location_mode, response = self._clients.blob.download(
+ range=range_header,
+ range_get_content_md5=range_validation,
+ validate_content=self._validate_content,
+ data_stream_total=None,
+ download_stream_current=0,
+ **self._request_options
+ )
+
+ # Check the location we read from to ensure we use the same one
+ # for subsequent requests.
+ self._location_mode = location_mode
+
+ # Parse the total file size and adjust the download size if ranges
+ # were specified
+ self._file_size = parse_length_from_content_range(response.properties.content_range)
+ if self._end_range is not None:
+ # Use the end range index unless it is over the end of the file
+ self.size = min(self._file_size, self._end_range - self._start_range + 1)
+ elif self._start_range is not None:
+ self.size = self._file_size - self._start_range
+ else:
+ self.size = self._file_size
+
+ except HttpResponseError as error:
+ if self._start_range is None and error.response.status_code == 416:
+ # Get range will fail on an empty file. If the user did not
+ # request a range, do a regular get request in order to get
+ # any properties.
+ try:
+ _, response = self._clients.blob.download(
+ validate_content=self._validate_content,
+ data_stream_total=0,
+ download_stream_current=0,
+ **self._request_options
+ )
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ # Set the download size to empty
+ self.size = 0
+ self._file_size = 0
+ else:
+ process_storage_error(error)
+
+ # get page ranges to optimize downloading sparse page blob
+ if response.properties.blob_type == 'PageBlob':
+ try:
+ page_ranges = self._clients.page_blob.get_page_ranges()
+ self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
+ # according to the REST API documentation:
+ # in a highly fragmented page blob with a large number of writes,
+ # a Get Page Ranges request can fail due to an internal server timeout.
+ # thus, if the page blob is not sparse, it's ok for it to fail
+ except HttpResponseError:
+ pass
+
+ # If the file is small, the download is complete at this point.
+ # If file size is large, download the rest of the file in chunks.
+ if response.properties.size != self.size:
+ # Lock on the etag. This can be overriden by the user by specifying '*'
+ if self._request_options.get("modified_access_conditions"):
+ if not self._request_options["modified_access_conditions"].if_match:
+ self._request_options["modified_access_conditions"].if_match = response.properties.etag
+ else:
+ self._download_complete = True
+ return response
+
+ def chunks(self):
+ if self.size == 0 or self._download_complete:
+ iter_downloader = None
+ else:
+ data_end = self._file_size
+ if self._end_range is not None:
+ # Use the end range index unless it is over the end of the file
+ data_end = min(self._file_size, self._end_range + 1)
+ iter_downloader = _ChunkDownloader(
+ client=self._clients.blob,
+ non_empty_ranges=self._non_empty_ranges,
+ total_size=self.size,
+ chunk_size=self._config.max_chunk_get_size,
+ current_progress=self._first_get_size,
+ start_range=self._initial_range[1] + 1, # start where the first download ended
+ end_range=data_end,
+ stream=None,
+ parallel=False,
+ validate_content=self._validate_content,
+ encryption_options=self._encryption_options,
+ use_location=self._location_mode,
+ **self._request_options
+ )
+ return _ChunkIterator(
+ size=self.size,
+ content=self._current_content,
+ downloader=iter_downloader)
+
+ def readall(self):
+ """Download the contents of this blob.
+
+ This operation is blocking until all data is downloaded.
+
+ :rtype: bytes or str
+ """
+ stream = BytesIO()
+ self.readinto(stream)
+ data = stream.getvalue()
+ if self._encoding:
+ return data.decode(self._encoding)
+ return data
+
+ def content_as_bytes(self, max_concurrency=1):
+ """Download the contents of this file.
+
+ This operation is blocking until all data is downloaded.
+
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :rtype: bytes
+ """
+ warnings.warn(
+ "content_as_bytes is deprecated, use readall instead",
+ DeprecationWarning
+ )
+ self._max_concurrency = max_concurrency
+ return self.readall()
+
+ def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+ """Download the contents of this blob, and decode as text.
+
+ This operation is blocking until all data is downloaded.
+
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :param str encoding:
+ Test encoding to decode the downloaded bytes. Default is UTF-8.
+ :rtype: str
+ """
+ warnings.warn(
+ "content_as_text is deprecated, use readall instead",
+ DeprecationWarning
+ )
+ self._max_concurrency = max_concurrency
+ self._encoding = encoding
+ return self.readall()
+
+ def readinto(self, stream):
+ """Download the contents of this file to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream. The stream must be seekable if the download
+ uses more than one parallel connection.
+ :returns: The number of bytes read.
+ :rtype: int
+ """
+ # The stream must be seekable if parallel download is required
+ parallel = self._max_concurrency > 1
+ if parallel:
+ error_message = "Target stream handle must be seekable."
+ if sys.version_info >= (3,) and not stream.seekable():
+ raise ValueError(error_message)
+
+ try:
+ stream.seek(stream.tell())
+ except (NotImplementedError, AttributeError):
+ raise ValueError(error_message)
+
+ # Write the content to the user stream
+ stream.write(self._current_content)
+ if self._download_complete:
+ return self.size
+
+ data_end = self._file_size
+ if self._end_range is not None:
+ # Use the length unless it is over the end of the file
+ data_end = min(self._file_size, self._end_range + 1)
+
+ downloader = _ChunkDownloader(
+ client=self._clients.blob,
+ non_empty_ranges=self._non_empty_ranges,
+ total_size=self.size,
+ chunk_size=self._config.max_chunk_get_size,
+ current_progress=self._first_get_size,
+ start_range=self._initial_range[1] + 1, # Start where the first download ended
+ end_range=data_end,
+ stream=stream,
+ parallel=parallel,
+ validate_content=self._validate_content,
+ encryption_options=self._encryption_options,
+ use_location=self._location_mode,
+ **self._request_options
+ )
+ if parallel:
+ import concurrent.futures
+ executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency)
+ list(executor.map(
+ with_current_context(downloader.process_chunk),
+ downloader.get_chunk_offsets()
+ ))
+ else:
+ for chunk in downloader.get_chunk_offsets():
+ downloader.process_chunk(chunk)
+ return self.size
+
+ def download_to_stream(self, stream, max_concurrency=1):
+ """Download the contents of this blob to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream. The stream must be seekable if the download
+ uses more than one parallel connection.
+ :returns: The properties of the downloaded blob.
+ :rtype: Any
+ """
+ warnings.warn(
+ "download_to_stream is deprecated, use readinto instead",
+ DeprecationWarning
+ )
+ self._max_concurrency = max_concurrency
+ self.readinto(stream)
+ return self.properties
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/__init__.py
new file mode 100644
index 00000000000..f5c8f4a954d
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/__init__.py
@@ -0,0 +1,18 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._azure_blob_storage import AzureBlobStorage
+__all__ = ['AzureBlobStorage']
+
+from .version import VERSION
+
+__version__ = VERSION
+
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/_azure_blob_storage.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/_azure_blob_storage.py
new file mode 100644
index 00000000000..831f6ce2033
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/_azure_blob_storage.py
@@ -0,0 +1,83 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core import PipelineClient
+from msrest import Serializer, Deserializer
+
+from ._configuration import AzureBlobStorageConfiguration
+from azure.core.exceptions import map_error
+from .operations import ServiceOperations
+from .operations import ContainerOperations
+from .operations import DirectoryOperations
+from .operations import BlobOperations
+from .operations import PageBlobOperations
+from .operations import AppendBlobOperations
+from .operations import BlockBlobOperations
+from . import models
+
+
+class AzureBlobStorage(object):
+ """AzureBlobStorage
+
+
+ :ivar service: Service operations
+ :vartype service: azure.storage.blob.operations.ServiceOperations
+ :ivar container: Container operations
+ :vartype container: azure.storage.blob.operations.ContainerOperations
+ :ivar directory: Directory operations
+ :vartype directory: azure.storage.blob.operations.DirectoryOperations
+ :ivar blob: Blob operations
+ :vartype blob: azure.storage.blob.operations.BlobOperations
+ :ivar page_blob: PageBlob operations
+ :vartype page_blob: azure.storage.blob.operations.PageBlobOperations
+ :ivar append_blob: AppendBlob operations
+ :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations
+ :ivar block_blob: BlockBlob operations
+ :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations
+
+ :param url: The URL of the service account, container, or blob that is the
+ targe of the desired operation.
+ :type url: str
+ """
+
+ def __init__(self, url, **kwargs):
+
+ base_url = '{url}'
+ self._config = AzureBlobStorageConfiguration(url, **kwargs)
+ self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = '2020-02-10'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
+ self.service = ServiceOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.container = ContainerOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.directory = DirectoryOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.blob = BlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.page_blob = PageBlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.append_blob = AppendBlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.block_blob = BlockBlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ def close(self):
+ self._client.close()
+ def __enter__(self):
+ self._client.__enter__()
+ return self
+ def __exit__(self, *exc_details):
+ self._client.__exit__(*exc_details)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/_configuration.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/_configuration.py
new file mode 100644
index 00000000000..c8a1875b6af
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/_configuration.py
@@ -0,0 +1,52 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.configuration import Configuration
+from azure.core.pipeline import policies
+
+from .version import VERSION
+
+
+class AzureBlobStorageConfiguration(Configuration):
+ """Configuration for AzureBlobStorage
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param url: The URL of the service account, container, or blob that is the
+ targe of the desired operation.
+ :type url: str
+ :ivar version: Specifies the version of the operation to use for this
+ request.
+ :type version: str
+ """
+
+ def __init__(self, url, **kwargs):
+
+ if url is None:
+ raise ValueError("Parameter 'url' must not be None.")
+
+ super(AzureBlobStorageConfiguration, self).__init__(**kwargs)
+ self._configure(**kwargs)
+
+ self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION))
+ self.generate_client_request_id = True
+
+ self.url = url
+ self.version = "2020-02-10"
+
+ def _configure(self, **kwargs):
+ self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/__init__.py
new file mode 100644
index 00000000000..009c9659435
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/__init__.py
@@ -0,0 +1,13 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._azure_blob_storage_async import AzureBlobStorage
+__all__ = ['AzureBlobStorage']
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/_azure_blob_storage_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/_azure_blob_storage_async.py
new file mode 100644
index 00000000000..367e296ea6f
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/_azure_blob_storage_async.py
@@ -0,0 +1,84 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core import AsyncPipelineClient
+from msrest import Serializer, Deserializer
+
+from ._configuration_async import AzureBlobStorageConfiguration
+from azure.core.exceptions import map_error
+from .operations_async import ServiceOperations
+from .operations_async import ContainerOperations
+from .operations_async import DirectoryOperations
+from .operations_async import BlobOperations
+from .operations_async import PageBlobOperations
+from .operations_async import AppendBlobOperations
+from .operations_async import BlockBlobOperations
+from .. import models
+
+
+class AzureBlobStorage(object):
+ """AzureBlobStorage
+
+
+ :ivar service: Service operations
+ :vartype service: azure.storage.blob.aio.operations_async.ServiceOperations
+ :ivar container: Container operations
+ :vartype container: azure.storage.blob.aio.operations_async.ContainerOperations
+ :ivar directory: Directory operations
+ :vartype directory: azure.storage.blob.aio.operations_async.DirectoryOperations
+ :ivar blob: Blob operations
+ :vartype blob: azure.storage.blob.aio.operations_async.BlobOperations
+ :ivar page_blob: PageBlob operations
+ :vartype page_blob: azure.storage.blob.aio.operations_async.PageBlobOperations
+ :ivar append_blob: AppendBlob operations
+ :vartype append_blob: azure.storage.blob.aio.operations_async.AppendBlobOperations
+ :ivar block_blob: BlockBlob operations
+ :vartype block_blob: azure.storage.blob.aio.operations_async.BlockBlobOperations
+
+ :param url: The URL of the service account, container, or blob that is the
+ targe of the desired operation.
+ :type url: str
+ """
+
+ def __init__(
+ self, url, **kwargs):
+
+ base_url = '{url}'
+ self._config = AzureBlobStorageConfiguration(url, **kwargs)
+ self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self.api_version = '2020-02-10'
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
+ self.service = ServiceOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.container = ContainerOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.directory = DirectoryOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.blob = BlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.page_blob = PageBlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.append_blob = AppendBlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.block_blob = BlockBlobOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ async def close(self):
+ await self._client.close()
+ async def __aenter__(self):
+ await self._client.__aenter__()
+ return self
+ async def __aexit__(self, *exc_details):
+ await self._client.__aexit__(*exc_details)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/_configuration_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/_configuration_async.py
new file mode 100644
index 00000000000..609cb82ac85
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/_configuration_async.py
@@ -0,0 +1,53 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.configuration import Configuration
+from azure.core.pipeline import policies
+
+from ..version import VERSION
+
+
+class AzureBlobStorageConfiguration(Configuration):
+ """Configuration for AzureBlobStorage
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param url: The URL of the service account, container, or blob that is the
+ targe of the desired operation.
+ :type url: str
+ :ivar version: Specifies the version of the operation to use for this
+ request.
+ :type version: str
+ """
+
+ def __init__(self, url, **kwargs):
+
+ if url is None:
+ raise ValueError("Parameter 'url' must not be None.")
+
+ super(AzureBlobStorageConfiguration, self).__init__(**kwargs)
+ self._configure(**kwargs)
+
+ self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION))
+ self.generate_client_request_id = True
+ self.accept_language = None
+
+ self.url = url
+ self.version = "2020-02-10"
+
+ def _configure(self, **kwargs):
+ self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/__init__.py
new file mode 100644
index 00000000000..dec05192c81
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/__init__.py
@@ -0,0 +1,28 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._service_operations_async import ServiceOperations
+from ._container_operations_async import ContainerOperations
+from ._directory_operations_async import DirectoryOperations
+from ._blob_operations_async import BlobOperations
+from ._page_blob_operations_async import PageBlobOperations
+from ._append_blob_operations_async import AppendBlobOperations
+from ._block_blob_operations_async import BlockBlobOperations
+
+__all__ = [
+ 'ServiceOperations',
+ 'ContainerOperations',
+ 'DirectoryOperations',
+ 'BlobOperations',
+ 'PageBlobOperations',
+ 'AppendBlobOperations',
+ 'BlockBlobOperations',
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_append_blob_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_append_blob_operations_async.py
new file mode 100644
index 00000000000..ea79827cdbd
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_append_blob_operations_async.py
@@ -0,0 +1,694 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class AppendBlobOperations:
+ """AppendBlobOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_blob_type = "AppendBlob"
+
+ async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Create Append Blob operation creates a new append blob.
+
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Append Block operation commits a new block of data to the end of an
+ existing append blob. The Append Block operation is permitted only if
+ the blob was created with x-ms-blob-type set to AppendBlob. Append
+ Block is supported only on version 2015-02-21 version or later.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param append_position_access_conditions: Additional parameters for
+ the operation
+ :type append_position_access_conditions:
+ ~azure.storage.blob.models.AppendPositionAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ max_size = None
+ if append_position_access_conditions is not None:
+ max_size = append_position_access_conditions.max_size
+ append_position = None
+ if append_position_access_conditions is not None:
+ append_position = append_position_access_conditions.append_position
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "appendblock"
+
+ # Construct URL
+ url = self.append_block.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if max_size is not None:
+ header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
+ if append_position is not None:
+ header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ append_block.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Append Block operation commits a new block of data to the end of an
+ existing append blob where the contents are read from a source url. The
+ Append Block operation is permitted only if the blob was created with
+ x-ms-blob-type set to AppendBlob. Append Block is supported only on
+ version 2015-02-21 version or later.
+
+ :param source_url: Specify a URL to the copy source.
+ :type source_url: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param source_range: Bytes of source data in the specified range.
+ :type source_range: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param source_contentcrc64: Specify the crc64 calculated for the range
+ of bytes that must be read from the copy source.
+ :type source_contentcrc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param append_position_access_conditions: Additional parameters for
+ the operation
+ :type append_position_access_conditions:
+ ~azure.storage.blob.models.AppendPositionAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ max_size = None
+ if append_position_access_conditions is not None:
+ max_size = append_position_access_conditions.max_size
+ append_position = None
+ if append_position_access_conditions is not None:
+ append_position = append_position_access_conditions.append_position
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ comp = "appendblock"
+
+ # Construct URL
+ url = self.append_block_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
+ if source_range is not None:
+ header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if source_contentcrc64 is not None:
+ header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if max_size is not None:
+ header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
+ if append_position is not None:
+ header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ append_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, *, cls=None, **kwargs):
+ """The Seal operation seals the Append Blob to make it read-only. Seal is
+ supported only on version 2019-12-12 version or later.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param append_position_access_conditions: Additional parameters for
+ the operation
+ :type append_position_access_conditions:
+ ~azure.storage.blob.models.AppendPositionAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ append_position = None
+ if append_position_access_conditions is not None:
+ append_position = append_position_access_conditions.append_position
+
+ comp = "seal"
+
+ # Construct URL
+ url = self.seal.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if append_position is not None:
+ header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ seal.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_blob_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_blob_operations_async.py
new file mode 100644
index 00000000000..54d6dab2a31
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_blob_operations_async.py
@@ -0,0 +1,3067 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class BlobOperations:
+ """BlobOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_requires_sync: . Constant value: "true".
+ :ivar x_ms_copy_action: . Constant value: "abort".
+ :ivar restype: . Constant value: "account".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_requires_sync = "true"
+ self.x_ms_copy_action = "abort"
+ self.restype = "account"
+
+ async def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Download operation reads or downloads a blob from the system,
+ including its metadata and properties. You can also call Download to
+ read a snapshot.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param range_get_content_md5: When set to true and specified together
+ with the Range, the service returns the MD5 hash for the range, as
+ long as the range is less than or equal to 4 MB in size.
+ :type range_get_content_md5: bool
+ :param range_get_content_crc64: When set to true and specified
+ together with the Range, the service returns the CRC64 hash for the
+ range, as long as the range is less than or equal to 4 MB in size.
+ :type range_get_content_crc64: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: object or the result of cls(response)
+ :rtype: Generator
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.download.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ if range_get_content_md5 is not None:
+ header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool')
+ if range_get_content_crc64 is not None:
+ header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 206]:
+ await response.load_body()
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')),
+ 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ if response.status_code == 206:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')),
+ 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ download.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Get Properties operation returns all user-defined metadata,
+ standard HTTP properties, and system properties for the blob. It does
+ not return the content of the blob.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')),
+ 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')),
+ 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')),
+ 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')),
+ 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')),
+ 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')),
+ 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')),
+ 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')),
+ 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_properties.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """If the storage account's soft delete feature is disabled then, when a
+ blob is deleted, it is permanently removed from the storage account. If
+ the storage account's soft delete feature is enabled, then, when a blob
+ is deleted, it is marked for deletion and becomes inaccessible
+ immediately. However, the blob service retains the blob or snapshot for
+ the number of days specified by the DeleteRetentionPolicy section of
+ [Storage service properties] (Set-Blob-Service-Properties.md). After
+ the specified number of days has passed, the blob's data is permanently
+ removed from the storage account. Note that you continue to be charged
+ for the soft-deleted blob's storage until it is permanently removed.
+ Use the List Blobs API and specify the "include=deleted" query
+ parameter to discover which blobs and snapshots have been soft deleted.
+ You can then use the Undelete Blob API to restore a soft-deleted blob.
+ All other operations on a soft-deleted blob or snapshot causes the
+ service to return an HTTP status code of 404 (ResourceNotFound).
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param delete_snapshots: Required if the blob has associated
+ snapshots. Specify one of the following two options: include: Delete
+ the base blob and all of its snapshots. only: Delete only the blob's
+ snapshots and not the blob itself. Possible values include: 'include',
+ 'only'
+ :type delete_snapshots: str or
+ ~azure.storage.blob.models.DeleteSnapshotsOptionType
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if delete_snapshots is not None:
+ header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Set the owner, group, permissions, or access control list for a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_acl: Sets POSIX access control rights on files and
+ directories. The value is a comma-separated list of access control
+ entries. Each access control entry (ACE) consists of a scope, a type,
+ a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type posix_acl: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "setAccessControl"
+
+ # Construct URL
+ url = self.set_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ set_access_control.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Get the owner, group, permissions, or access control list for a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param upn: Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the identity values returned in
+ the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false.
+ :type upn: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "getAccessControl"
+
+ # Construct URL
+ url = self.get_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
+ 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
+ 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
+ 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ get_access_control.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
+ """Rename a blob/file. By default, the destination is overwritten and if
+ the destination already exists and has a lease the lease is broken.
+ This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ To fail if the destination already exists, use a conditional request
+ with If-None-Match: "*".
+
+ :param rename_source: The file or directory to be renamed. The value
+ must have the following format: "/{filesysystem}/{path}". If
+ "x-ms-properties" is specified, the properties will overwrite the
+ existing properties; otherwise, the existing properties will be
+ preserved.
+ :type rename_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param path_rename_mode: Determines the behavior of the rename
+ operation. Possible values include: 'legacy', 'posix'
+ :type path_rename_mode: str or
+ ~azure.storage.blob.models.PathRenameMode
+ :param directory_properties: Optional. User-defined properties to be
+ stored with the file or directory, in the format of a comma-separated
+ list of name and value pairs "n1=v1, n2=v2, ...", where each value is
+ base64 encoded.
+ :type directory_properties: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_umask: Only valid if Hierarchical Namespace is enabled
+ for the account. This umask restricts permission settings for file and
+ directory, and will only be applied when default Acl does not exist in
+ parent directory. If the umask bit has set, it means that the
+ corresponding permission will be disabled. Otherwise the corresponding
+ permission will be determined by the permission. A 4-digit octal
+ notation (e.g. 0022) is supported here. If no umask was specified, a
+ default umask - 0027 will be used.
+ :type posix_umask: str
+ :param source_lease_id: A lease ID for the source path. If specified,
+ the source path must have an active lease and the lease ID must match.
+ :type source_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param directory_http_headers: Additional parameters for the operation
+ :type directory_http_headers:
+ ~azure.storage.blob.models.DirectoryHttpHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ cache_control = None
+ if directory_http_headers is not None:
+ cache_control = directory_http_headers.cache_control
+ content_type = None
+ if directory_http_headers is not None:
+ content_type = directory_http_headers.content_type
+ content_encoding = None
+ if directory_http_headers is not None:
+ content_encoding = directory_http_headers.content_encoding
+ content_language = None
+ if directory_http_headers is not None:
+ content_language = directory_http_headers.content_language
+ content_disposition = None
+ if directory_http_headers is not None:
+ content_disposition = directory_http_headers.content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ # Construct URL
+ url = self.rename.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if path_rename_mode is not None:
+ query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
+ if directory_properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
+ if source_lease_id is not None:
+ header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ rename.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def undelete(self, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """Undelete a blob that was previously soft deleted.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "undelete"
+
+ # Construct URL
+ url = self.undelete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ undelete.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs):
+ """Sets the time a blob will expire and be deleted.
+
+ :param expiry_options: Required. Indicates mode of the expiry time.
+ Possible values include: 'NeverExpire', 'RelativeToCreation',
+ 'RelativeToNow', 'Absolute'
+ :type expiry_options: str or
+ ~azure.storage.blob.models.BlobExpiryOptions
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param expires_on: The time to set the blob to expiry
+ :type expires_on: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "expiry"
+
+ # Construct URL
+ url = self.set_expiry.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str')
+ if expires_on is not None:
+ header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_expiry.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Set HTTP Headers operation sets system properties on the blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "properties"
+
+ # Construct URL
+ url = self.set_http_headers.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_http_headers.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Set Blob Metadata operation sets user-defined metadata for the
+ specified blob as one or more name-value pairs.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "metadata"
+
+ # Construct URL
+ url = self.set_metadata.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_metadata.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param duration: Specifies the duration of the lease, in seconds, or
+ negative one (-1) for a lease that never expires. A non-infinite lease
+ can be between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change.
+ :type duration: int
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "acquire"
+
+ # Construct URL
+ url = self.acquire_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if duration is not None:
+ header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
+ if proposed_lease_id is not None:
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ acquire_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "release"
+
+ # Construct URL
+ url = self.release_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ release_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "renew"
+
+ # Construct URL
+ url = self.renew_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ renew_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "change"
+
+ # Construct URL
+ url = self.change_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ change_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param break_period: For a break operation, proposed duration the
+ lease should continue before it is broken, in seconds, between 0 and
+ 60. This break period is only used if it is shorter than the time
+ remaining on the lease. If longer, the time remaining on the lease is
+ used. A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break period.
+ If this header does not appear with a break operation, a
+ fixed-duration lease breaks after the remaining lease period elapses,
+ and an infinite lease breaks immediately.
+ :type break_period: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "break"
+
+ # Construct URL
+ url = self.break_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if break_period is not None:
+ header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ break_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
+ """The Create Snapshot operation creates a read-only snapshot of a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ comp = "snapshot"
+
+ # Construct URL
+ url = self.create_snapshot.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create_snapshot.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
+ """The Start Copy From URL operation copies a blob or an internet resource
+ to a new blob.
+
+ :param copy_source: Specifies the name of the source page blob
+ snapshot. This value is a URL of up to 2 KB in length that specifies a
+ page blob snapshot. The value should be URL-encoded as it would appear
+ in a request URI. The source blob must either be public or must be
+ authenticated via a shared access signature.
+ :type copy_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param rehydrate_priority: Optional: Indicates the priority with which
+ to rehydrate an archived blob. Possible values include: 'High',
+ 'Standard'
+ :type rehydrate_priority: str or
+ ~azure.storage.blob.models.RehydratePriority
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param seal_blob: Overrides the sealed state of the destination blob.
+ Service version 2019-12-12 and newer.
+ :type seal_blob: bool
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+ source_if_tags = None
+ if source_modified_access_conditions is not None:
+ source_if_tags = source_modified_access_conditions.source_if_tags
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ # Construct URL
+ url = self.start_copy_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ if rehydrate_priority is not None:
+ header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
+ header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ if seal_blob is not None:
+ header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+ if source_if_tags is not None:
+ header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs):
+ """The Copy From URL operation copies a blob or an internet resource to a
+ new blob. It will not return a response until the copy is complete.
+
+ :param copy_source: Specifies the name of the source page blob
+ snapshot. This value is a URL of up to 2 KB in length that specifies a
+ page blob snapshot. The value should be URL-encoded as it would appear
+ in a request URI. The source blob must either be public or must be
+ authenticated via a shared access signature.
+ :type copy_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ # Construct URL
+ url = self.copy_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
+ """The Abort Copy From URL operation aborts a pending Copy From URL
+ operation, and leaves a destination blob with zero length and full
+ metadata.
+
+ :param copy_id: The copy identifier provided in the x-ms-copy-id
+ header of the original Copy Blob operation.
+ :type copy_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ comp = "copy"
+
+ # Construct URL
+ url = self.abort_copy_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Set Tier operation sets the tier on a blob. The operation is
+ allowed on a page blob in a premium storage account and on a block blob
+ in a blob storage account (locally redundant storage only). A premium
+ page blob's tier determines the allowed size, IOPS, and bandwidth of
+ the blob. A block blob's tier determines Hot/Cool/Archive storage type.
+ This operation does not update the blob's ETag.
+
+ :param tier: Indicates the tier to be set on the blob. Possible values
+ include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60',
+ 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierRequired
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param rehydrate_priority: Optional: Indicates the priority with which
+ to rehydrate an archived blob. Possible values include: 'High',
+ 'Standard'
+ :type rehydrate_priority: str or
+ ~azure.storage.blob.models.RehydratePriority
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "tier"
+
+ # Construct URL
+ url = self.set_tier.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ if rehydrate_priority is not None:
+ header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_tier.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def get_account_info(self, *, cls=None, **kwargs):
+ """Returns the sku name and account kind .
+
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_account_info.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
+ 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_account_info.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Query operation enables users to select/project on blob data by
+ providing simple query expressions.
+
+ :param query_request: the query request
+ :type query_request: ~azure.storage.blob.models.QueryRequest
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: object or the result of cls(response)
+ :rtype: Generator
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "query"
+
+ # Construct URL
+ url = self.query.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+ if query_request is not None:
+ body_content = self._serialize.body(query_request, 'QueryRequest')
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 206]:
+ await response.load_body()
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ if response.status_code == 206:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ query.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Get Tags operation enables users to get the tags associated with a
+ blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: BlobTags or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.BlobTags
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "tags"
+
+ # Construct URL
+ url = self.get_tags.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('BlobTags', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_tags.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Set Tags operation enables users to set tags on a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param tags: Blob tags
+ :type tags: ~azure.storage.blob.models.BlobTags
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "tags"
+
+ # Construct URL
+ url = self.set_tags.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+ if tags is not None:
+ body_content = self._serialize.body(tags, 'BlobTags')
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_tags.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_block_blob_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_block_blob_operations_async.py
new file mode 100644
index 00000000000..e06937056b0
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_block_blob_operations_async.py
@@ -0,0 +1,833 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class BlockBlobOperations:
+ """BlockBlobOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_blob_type = "BlockBlob"
+
+ async def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Upload Block Blob operation updates the content of an existing
+ block blob. Updating an existing block blob overwrites any existing
+ metadata on the blob. Partial updates are not supported with Put Blob;
+ the content of the existing blob is overwritten with the content of the
+ new blob. To perform a partial update of the content of a block blob,
+ use the Put Block List operation.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.upload.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ upload.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, *, cls=None, **kwargs):
+ """The Stage Block operation creates a new block to be committed as part
+ of a blob.
+
+ :param block_id: A valid Base64 string value that identifies the
+ block. Prior to encoding, the string must be less than or equal to 64
+ bytes in size. For a given blob, the length of the value specified for
+ the blockid parameter must be the same size for each block.
+ :type block_id: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param body: Initial data
+ :type body: Generator
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+
+ comp = "block"
+
+ # Construct URL
+ url = self.stage_block.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ stage_block.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Stage Block operation creates a new block to be committed as part
+ of a blob where the contents are read from a URL.
+
+ :param block_id: A valid Base64 string value that identifies the
+ block. Prior to encoding, the string must be less than or equal to 64
+ bytes in size. For a given blob, the length of the value specified for
+ the blockid parameter must be the same size for each block.
+ :type block_id: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param source_url: Specify a URL to the copy source.
+ :type source_url: str
+ :param source_range: Bytes of source data in the specified range.
+ :type source_range: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param source_contentcrc64: Specify the crc64 calculated for the range
+ of bytes that must be read from the copy source.
+ :type source_contentcrc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ comp = "block"
+
+ # Construct URL
+ url = self.stage_block_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
+ if source_range is not None:
+ header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if source_contentcrc64 is not None:
+ header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Commit Block List operation writes a blob by specifying the list of
+ block IDs that make up the blob. In order to be written as part of a
+ blob, a block must have been successfully written to the server in a
+ prior Put Block operation. You can call Put Block List to update a blob
+ by uploading only those blocks that have changed, then committing the
+ new and existing blocks together. You can do this by specifying whether
+ to commit a block from the committed block list or from the uncommitted
+ block list, or to commit the most recently uploaded version of the
+ block, whichever list it may belong to.
+
+ :param blocks:
+ :type blocks: ~azure.storage.blob.models.BlockLookupList
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "blocklist"
+
+ # Construct URL
+ url = self.commit_block_list.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+ body_content = self._serialize.body(blocks, 'BlockLookupList')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ commit_block_list.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Get Block List operation retrieves the list of blocks that have
+ been uploaded as part of a block blob.
+
+ :param list_type: Specifies whether to return the list of committed
+ blocks, the list of uncommitted blocks, or both lists together.
+ Possible values include: 'committed', 'uncommitted', 'all'
+ :type list_type: str or ~azure.storage.blob.models.BlockListType
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: BlockList or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.BlockList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "blocklist"
+
+ # Construct URL
+ url = self.get_block_list.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('BlockList', response)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_block_list.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_container_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_container_operations_async.py
new file mode 100644
index 00000000000..b7e1eb840e7
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_container_operations_async.py
@@ -0,0 +1,1400 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class ContainerOperations:
+ """ContainerOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+
+ async def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, *, cls=None, **kwargs):
+ """creates a new container under the specified account. If the container
+ with the same name already exists, the operation fails.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param access: Specifies whether data in the container may be accessed
+ publicly and the level of access. Possible values include:
+ 'container', 'blob'
+ :type access: str or ~azure.storage.blob.models.PublicAccessType
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param container_cpk_scope_info: Additional parameters for the
+ operation
+ :type container_cpk_scope_info:
+ ~azure.storage.blob.models.ContainerCpkScopeInfo
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ default_encryption_scope = None
+ if container_cpk_scope_info is not None:
+ default_encryption_scope = container_cpk_scope_info.default_encryption_scope
+ prevent_encryption_scope_override = None
+ if container_cpk_scope_info is not None:
+ prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override
+
+ restype = "container"
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if access is not None:
+ header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if default_encryption_scope is not None:
+ header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str')
+ if prevent_encryption_scope_override is not None:
+ header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{containerName}'}
+
+ async def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
+ """returns all user-defined metadata and system properties for the
+ specified container. The data returned does not include the container's
+ list of blobs.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ restype = "container"
+
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
+ 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')),
+ 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')),
+ 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')),
+ 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_properties.metadata = {'url': '/{containerName}'}
+
+ async def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """operation marks the specified container for deletion. The container and
+ any blobs contained within it are later deleted during garbage
+ collection.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ restype = "container"
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{containerName}'}
+
+ async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """operation sets one or more user-defined name-value pairs for the
+ specified container.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+
+ restype = "container"
+ comp = "metadata"
+
+ # Construct URL
+ url = self.set_metadata.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_metadata.metadata = {'url': '/{containerName}'}
+
+ async def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs):
+ """gets the permissions for the specified container. The permissions
+ indicate whether container data may be accessed publicly.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: list or the result of cls(response)
+ :rtype: list[~azure.storage.blob.models.SignedIdentifier]
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ restype = "container"
+ comp = "acl"
+
+ # Construct URL
+ url = self.get_access_policy.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('[SignedIdentifier]', response)
+ header_dict = {
+ 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_access_policy.metadata = {'url': '/{containerName}'}
+
+ async def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """sets the permissions for the specified container. The permissions
+ indicate whether blobs in a container may be accessed publicly.
+
+ :param container_acl: the acls for the container
+ :type container_acl: list[~azure.storage.blob.models.SignedIdentifier]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param access: Specifies whether data in the container may be accessed
+ publicly and the level of access. Possible values include:
+ 'container', 'blob'
+ :type access: str or ~azure.storage.blob.models.PublicAccessType
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ restype = "container"
+ comp = "acl"
+
+ # Construct URL
+ url = self.set_access_policy.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ if access is not None:
+ header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct body
+ serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}}
+ if container_acl is not None:
+ body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_access_policy.metadata = {'url': '/{containerName}'}
+
+ async def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, *, cls=None, **kwargs):
+ """Restores a previously-deleted container.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param deleted_container_name: Optional. Version 2019-12-12 and
+ laster. Specifies the name of the deleted container to restore.
+ :type deleted_container_name: str
+ :param deleted_container_version: Optional. Version 2019-12-12 and
+ laster. Specifies the version of the deleted container to restore.
+ :type deleted_container_version: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "container"
+ comp = "undelete"
+
+ # Construct URL
+ url = self.restore.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if deleted_container_name is not None:
+ header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str')
+ if deleted_container_version is not None:
+ header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ restore.metadata = {'url': '/{containerName}'}
+
+ async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param duration: Specifies the duration of the lease, in seconds, or
+ negative one (-1) for a lease that never expires. A non-infinite lease
+ can be between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change.
+ :type duration: int
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "acquire"
+
+ # Construct URL
+ url = self.acquire_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if duration is not None:
+ header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
+ if proposed_lease_id is not None:
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ acquire_lease.metadata = {'url': '/{containerName}'}
+
+ async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "release"
+
+ # Construct URL
+ url = self.release_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ release_lease.metadata = {'url': '/{containerName}'}
+
+ async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "renew"
+
+ # Construct URL
+ url = self.renew_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ renew_lease.metadata = {'url': '/{containerName}'}
+
+ async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param break_period: For a break operation, proposed duration the
+ lease should continue before it is broken, in seconds, between 0 and
+ 60. This break period is only used if it is shorter than the time
+ remaining on the lease. If longer, the time remaining on the lease is
+ used. A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break period.
+ If this header does not appear with a break operation, a
+ fixed-duration lease breaks after the remaining lease period elapses,
+ and an infinite lease breaks immediately.
+ :type break_period: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "break"
+
+ # Construct URL
+ url = self.break_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if break_period is not None:
+ header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ break_lease.metadata = {'url': '/{containerName}'}
+
+ async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "change"
+
+ # Construct URL
+ url = self.change_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ change_lease.metadata = {'url': '/{containerName}'}
+
+ async def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """[Update] The List Blobs operation returns a list of the blobs under the
+ specified container.
+
+ :param prefix: Filters the results to return only containers whose
+ name begins with the specified prefix.
+ :type prefix: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param include: Include this parameter to specify one or more datasets
+ to include in the response.
+ :type include: list[str or
+ ~azure.storage.blob.models.ListBlobsIncludeItem]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: ListBlobsFlatSegmentResponse or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "container"
+ comp = "list"
+
+ # Construct URL
+ url = self.list_blob_flat_segment.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response)
+ header_dict = {
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_blob_flat_segment.metadata = {'url': '/{containerName}'}
+
+ async def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """[Update] The List Blobs operation returns a list of the blobs under the
+ specified container.
+
+ :param delimiter: When the request includes this parameter, the
+ operation returns a BlobPrefix element in the response body that acts
+ as a placeholder for all blobs whose names begin with the same
+ substring up to the appearance of the delimiter character. The
+ delimiter may be a single character or a string.
+ :type delimiter: str
+ :param prefix: Filters the results to return only containers whose
+ name begins with the specified prefix.
+ :type prefix: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param include: Include this parameter to specify one or more datasets
+ to include in the response.
+ :type include: list[str or
+ ~azure.storage.blob.models.ListBlobsIncludeItem]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: ListBlobsHierarchySegmentResponse or the result of
+ cls(response)
+ :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "container"
+ comp = "list"
+
+ # Construct URL
+ url = self.list_blob_hierarchy_segment.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response)
+ header_dict = {
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'}
+
+ async def get_account_info(self, *, cls=None, **kwargs):
+ """Returns the sku name and account kind .
+
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "account"
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_account_info.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
+ 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_account_info.metadata = {'url': '/{containerName}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_directory_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_directory_operations_async.py
new file mode 100644
index 00000000000..590c0f88433
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_directory_operations_async.py
@@ -0,0 +1,739 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class DirectoryOperations:
+ """DirectoryOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar resource: . Constant value: "directory".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.resource = "directory"
+
+ async def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Create a directory. By default, the destination is overwritten and if
+ the destination already exists and has a lease the lease is broken.
+ This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ To fail if the destination already exists, use a conditional request
+ with If-None-Match: "*".
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param directory_properties: Optional. User-defined properties to be
+ stored with the file or directory, in the format of a comma-separated
+ list of name and value pairs "n1=v1, n2=v2, ...", where each value is
+ base64 encoded.
+ :type directory_properties: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_umask: Only valid if Hierarchical Namespace is enabled
+ for the account. This umask restricts permission settings for file and
+ directory, and will only be applied when default Acl does not exist in
+ parent directory. If the umask bit has set, it means that the
+ corresponding permission will be disabled. Otherwise the corresponding
+ permission will be determined by the permission. A 4-digit octal
+ notation (e.g. 0022) is supported here. If no umask was specified, a
+ default umask - 0027 will be used.
+ :type posix_umask: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param directory_http_headers: Additional parameters for the operation
+ :type directory_http_headers:
+ ~azure.storage.blob.models.DirectoryHttpHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ cache_control = None
+ if directory_http_headers is not None:
+ cache_control = directory_http_headers.cache_control
+ content_type = None
+ if directory_http_headers is not None:
+ content_type = directory_http_headers.content_type
+ content_encoding = None
+ if directory_http_headers is not None:
+ content_encoding = directory_http_headers.content_encoding
+ content_language = None
+ if directory_http_headers is not None:
+ content_language = directory_http_headers.content_language
+ content_disposition = None
+ if directory_http_headers is not None:
+ content_disposition = directory_http_headers.content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if directory_properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
+ """Rename a directory. By default, the destination is overwritten and if
+ the destination already exists and has a lease the lease is broken.
+ This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ To fail if the destination already exists, use a conditional request
+ with If-None-Match: "*".
+
+ :param rename_source: The file or directory to be renamed. The value
+ must have the following format: "/{filesysystem}/{path}". If
+ "x-ms-properties" is specified, the properties will overwrite the
+ existing properties; otherwise, the existing properties will be
+ preserved.
+ :type rename_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param marker: When renaming a directory, the number of paths that are
+ renamed with each invocation is limited. If the number of paths to be
+ renamed exceeds this limit, a continuation token is returned in this
+ response header. When a continuation token is returned in the
+ response, it must be specified in a subsequent invocation of the
+ rename operation to continue renaming the directory.
+ :type marker: str
+ :param path_rename_mode: Determines the behavior of the rename
+ operation. Possible values include: 'legacy', 'posix'
+ :type path_rename_mode: str or
+ ~azure.storage.blob.models.PathRenameMode
+ :param directory_properties: Optional. User-defined properties to be
+ stored with the file or directory, in the format of a comma-separated
+ list of name and value pairs "n1=v1, n2=v2, ...", where each value is
+ base64 encoded.
+ :type directory_properties: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_umask: Only valid if Hierarchical Namespace is enabled
+ for the account. This umask restricts permission settings for file and
+ directory, and will only be applied when default Acl does not exist in
+ parent directory. If the umask bit has set, it means that the
+ corresponding permission will be disabled. Otherwise the corresponding
+ permission will be determined by the permission. A 4-digit octal
+ notation (e.g. 0022) is supported here. If no umask was specified, a
+ default umask - 0027 will be used.
+ :type posix_umask: str
+ :param source_lease_id: A lease ID for the source path. If specified,
+ the source path must have an active lease and the lease ID must match.
+ :type source_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param directory_http_headers: Additional parameters for the operation
+ :type directory_http_headers:
+ ~azure.storage.blob.models.DirectoryHttpHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ cache_control = None
+ if directory_http_headers is not None:
+ cache_control = directory_http_headers.cache_control
+ content_type = None
+ if directory_http_headers is not None:
+ content_type = directory_http_headers.content_type
+ content_encoding = None
+ if directory_http_headers is not None:
+ content_encoding = directory_http_headers.content_encoding
+ content_language = None
+ if directory_http_headers is not None:
+ content_language = directory_http_headers.content_language
+ content_disposition = None
+ if directory_http_headers is not None:
+ content_disposition = directory_http_headers.content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ # Construct URL
+ url = self.rename.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if marker is not None:
+ query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
+ if path_rename_mode is not None:
+ query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
+ if directory_properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
+ if source_lease_id is not None:
+ header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ rename.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Deletes the directory.
+
+ :param recursive_directory_delete: If "true", all paths beneath the
+ directory will be deleted. If "false" and the directory is non-empty,
+ an error occurs.
+ :type recursive_directory_delete: bool
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param marker: When renaming a directory, the number of paths that are
+ renamed with each invocation is limited. If the number of paths to be
+ renamed exceeds this limit, a continuation token is returned in this
+ response header. When a continuation token is returned in the
+ response, it must be specified in a subsequent invocation of the
+ rename operation to continue renaming the directory.
+ :type marker: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool')
+ if marker is not None:
+ query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Set the owner, group, permissions, or access control list for a
+ directory.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_acl: Sets POSIX access control rights on files and
+ directories. The value is a comma-separated list of access control
+ entries. Each access control entry (ACE) consists of a scope, a type,
+ a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type posix_acl: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "setAccessControl"
+
+ # Construct URL
+ url = self.set_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ set_access_control.metadata = {'url': '/{filesystem}/{path}'}
+
+ async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Get the owner, group, permissions, or access control list for a
+ directory.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param upn: Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the identity values returned in
+ the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false.
+ :type upn: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "getAccessControl"
+
+ # Construct URL
+ url = self.get_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
+ 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
+ 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
+ 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ get_access_control.metadata = {'url': '/{filesystem}/{path}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_page_blob_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_page_blob_operations_async.py
new file mode 100644
index 00000000000..c54a27cf8bf
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_page_blob_operations_async.py
@@ -0,0 +1,1399 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class PageBlobOperations:
+ """PageBlobOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_blob_type = "PageBlob"
+
+ async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Create operation creates a new page blob.
+
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param blob_content_length: This header specifies the maximum size for
+ the page blob, up to 1 TB. The page blob size must be aligned to a
+ 512-byte boundary.
+ :type blob_content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param tier: Optional. Indicates the tier to be set on the page blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80'
+ :type tier: str or
+ ~azure.storage.blob.models.PremiumPageBlobAccessTier
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param blob_sequence_number: Set for page blobs only. The sequence
+ number is a user-controlled value that you can use to track requests.
+ The value of the sequence number must be between 0 and 2^63 - 1.
+ :type blob_sequence_number: long
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
+ if blob_sequence_number is not None:
+ header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Upload Pages operation writes a range of pages to a page blob.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param sequence_number_access_conditions: Additional parameters for
+ the operation
+ :type sequence_number_access_conditions:
+ ~azure.storage.blob.models.SequenceNumberAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_sequence_number_less_than_or_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+ if_sequence_number_less_than = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+ if_sequence_number_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "page"
+ page_write = "update"
+
+ # Construct URL
+ url = self.upload_pages.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_sequence_number_less_than_or_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
+ if if_sequence_number_less_than is not None:
+ header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
+ if if_sequence_number_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ upload_pages.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Clear Pages operation clears a set of pages from a page blob.
+
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param sequence_number_access_conditions: Additional parameters for
+ the operation
+ :type sequence_number_access_conditions:
+ ~azure.storage.blob.models.SequenceNumberAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_sequence_number_less_than_or_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+ if_sequence_number_less_than = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+ if_sequence_number_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "page"
+ page_write = "clear"
+
+ # Construct URL
+ url = self.clear_pages.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_sequence_number_less_than_or_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
+ if if_sequence_number_less_than is not None:
+ header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
+ if if_sequence_number_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ clear_pages.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Upload Pages operation writes a range of pages to a page blob where
+ the contents are read from a URL.
+
+ :param source_url: Specify a URL to the copy source.
+ :type source_url: str
+ :param source_range: Bytes of source data in the specified range. The
+ length of this range should match the ContentLength header and
+ x-ms-range/Range destination range header.
+ :type source_range: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param range: The range of bytes to which the source range would be
+ written. The range should be 512 aligned and range-end is required.
+ :type range: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param source_contentcrc64: Specify the crc64 calculated for the range
+ of bytes that must be read from the copy source.
+ :type source_contentcrc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param sequence_number_access_conditions: Additional parameters for
+ the operation
+ :type sequence_number_access_conditions:
+ ~azure.storage.blob.models.SequenceNumberAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_sequence_number_less_than_or_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+ if_sequence_number_less_than = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+ if_sequence_number_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ comp = "page"
+ page_write = "update"
+
+ # Construct URL
+ url = self.upload_pages_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
+ header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if source_contentcrc64 is not None:
+ header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_sequence_number_less_than_or_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
+ if if_sequence_number_less_than is not None:
+ header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
+ if if_sequence_number_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Get Page Ranges operation returns the list of valid page ranges for
+ a page blob or snapshot of a page blob.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: PageList or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.PageList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "pagelist"
+
+ # Construct URL
+ url = self.get_page_ranges.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PageList', response)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_page_ranges.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Get Page Ranges Diff operation returns the list of valid page
+ ranges for a page blob that were changed between target blob and
+ previous snapshot.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param prevsnapshot: Optional in version 2015-07-08 and newer. The
+ prevsnapshot parameter is a DateTime value that specifies that the
+ response will contain only pages that were changed between target blob
+ and previous snapshot. Changed pages include both updated and cleared
+ pages. The target blob may be a snapshot, as long as the snapshot
+ specified by prevsnapshot is the older of the two. Note that
+ incremental snapshots are currently supported only for blobs created
+ on or after January 1, 2016.
+ :type prevsnapshot: str
+ :param prev_snapshot_url: Optional. This header is only supported in
+ service versions 2019-04-19 and after and specifies the URL of a
+ previous snapshot of the target blob. The response will only contain
+ pages that were changed between the target blob and its previous
+ snapshot.
+ :type prev_snapshot_url: str
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: PageList or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.PageList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "pagelist"
+
+ # Construct URL
+ url = self.get_page_ranges_diff.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if prevsnapshot is not None:
+ query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ if prev_snapshot_url is not None:
+ header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str')
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PageList', response)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Resize the Blob.
+
+ :param blob_content_length: This header specifies the maximum size for
+ the page blob, up to 1 TB. The page blob size must be aligned to a
+ 512-byte boundary.
+ :type blob_content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "properties"
+
+ # Construct URL
+ url = self.resize.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ resize.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """Update the sequence number of the blob.
+
+ :param sequence_number_action: Required if the
+ x-ms-blob-sequence-number header is set for the request. This property
+ applies to page blobs only. This property indicates how the service
+ should modify the blob's sequence number. Possible values include:
+ 'max', 'update', 'increment'
+ :type sequence_number_action: str or
+ ~azure.storage.blob.models.SequenceNumberActionType
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param blob_sequence_number: Set for page blobs only. The sequence
+ number is a user-controlled value that you can use to track requests.
+ The value of the sequence number must be between 0 and 2^63 - 1.
+ :type blob_sequence_number: long
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "properties"
+
+ # Construct URL
+ url = self.update_sequence_number.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType')
+ if blob_sequence_number is not None:
+ header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ update_sequence_number.metadata = {'url': '/{containerName}/{blob}'}
+
+ async def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs):
+ """The Copy Incremental operation copies a snapshot of the source page
+ blob to a destination page blob. The snapshot is copied such that only
+ the differential changes between the previously copied snapshot are
+ transferred to the destination. The copied snapshots are complete
+ copies of the original snapshot and can be read or copied from as
+ usual. This API is supported since REST version 2016-05-31.
+
+ :param copy_source: Specifies the name of the source page blob
+ snapshot. This value is a URL of up to 2 KB in length that specifies a
+ page blob snapshot. The value should be URL-encoded as it would appear
+ in a request URI. The source blob must either be public or must be
+ authenticated via a shared access signature.
+ :type copy_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "incrementalcopy"
+
+ # Construct URL
+ url = self.copy_incremental.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ copy_incremental.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_service_operations_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_service_operations_async.py
new file mode 100644
index 00000000000..e12c2b9bfb5
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/aio/operations_async/_service_operations_async.py
@@ -0,0 +1,664 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from ... import models
+
+
+class ServiceOperations:
+ """ServiceOperations async operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+
+ async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """Sets properties for a storage account's Blob service endpoint,
+ including properties for Storage Analytics and CORS (Cross-Origin
+ Resource Sharing) rules.
+
+ :param storage_service_properties: The StorageService properties.
+ :type storage_service_properties:
+ ~azure.storage.blob.models.StorageServiceProperties
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "properties"
+
+ # Construct URL
+ url = self.set_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct body
+ body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_properties.metadata = {'url': '/'}
+
+ async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """gets the properties of a storage account's Blob service, including
+ properties for Storage Analytics and CORS (Cross-Origin Resource
+ Sharing) rules.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: StorageServiceProperties or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.StorageServiceProperties
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('StorageServiceProperties', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_properties.metadata = {'url': '/'}
+
+ async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """Retrieves statistics related to replication for the Blob service. It is
+ only available on the secondary location endpoint when read-access
+ geo-redundant replication is enabled for the storage account.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: StorageServiceStats or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.StorageServiceStats
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "stats"
+
+ # Construct URL
+ url = self.get_statistics.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('StorageServiceStats', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_statistics.metadata = {'url': '/'}
+
+ async def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """The List Containers Segment operation returns a list of the containers
+ under the specified account.
+
+ :param prefix: Filters the results to return only containers whose
+ name begins with the specified prefix.
+ :type prefix: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param include: Include this parameter to specify that the container's
+ metadata be returned as part of the response body.
+ :type include: list[str or
+ ~azure.storage.blob.models.ListContainersIncludeType]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: ListContainersSegmentResponse or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "list"
+
+ # Construct URL
+ url = self.list_containers_segment.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('ListContainersSegmentResponse', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_containers_segment.metadata = {'url': '/'}
+
+ async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """Retrieves a user delegation key for the Blob service. This is only a
+ valid operation when using bearer token authentication.
+
+ :param key_info:
+ :type key_info: ~azure.storage.blob.models.KeyInfo
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: UserDelegationKey or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.UserDelegationKey
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "userdelegationkey"
+
+ # Construct URL
+ url = self.get_user_delegation_key.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct body
+ body_content = self._serialize.body(key_info, 'KeyInfo')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('UserDelegationKey', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_user_delegation_key.metadata = {'url': '/'}
+
+ async def get_account_info(self, *, cls=None, **kwargs):
+ """Returns the sku name and account kind .
+
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "account"
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_account_info.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
+ 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_account_info.metadata = {'url': '/'}
+
+ async def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, *, cls=None, **kwargs):
+ """The Batch operation allows multiple API calls to be embedded into a
+ single HTTP request.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param multipart_content_type: Required. The value of this header must
+ be multipart/mixed with a batch boundary. Example header value:
+ multipart/mixed; boundary=batch_
+ :type multipart_content_type: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: object or the result of cls(response)
+ :rtype: Generator
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "batch"
+
+ # Construct URL
+ url = self.submit_batch.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ await response.load_body()
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ submit_batch.metadata = {'url': '/'}
+
+ async def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, *, cls=None, **kwargs):
+ """The Filter Blobs operation enables callers to list blobs across all
+ containers whose tags match a given search expression. Filter blobs
+ searches across all containers within a storage account but can be
+ scoped within the expression to a single container.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param where: Filters the results to return only to return only blobs
+ whose tags match the specified expression.
+ :type where: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: FilterBlobSegment or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.FilterBlobSegment
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "blobs"
+
+ # Construct URL
+ url = self.filter_blobs.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if where is not None:
+ query_parameters['where'] = self._serialize.query("where", where, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('FilterBlobSegment', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ filter_blobs.metadata = {'url': '/'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/__init__.py
new file mode 100644
index 00000000000..3a6f8ed59f7
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/__init__.py
@@ -0,0 +1,229 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+try:
+ from ._models_py3 import AccessPolicy
+ from ._models_py3 import AppendPositionAccessConditions
+ from ._models_py3 import ArrowConfiguration
+ from ._models_py3 import ArrowField
+ from ._models_py3 import BlobFlatListSegment
+ from ._models_py3 import BlobHierarchyListSegment
+ from ._models_py3 import BlobHTTPHeaders
+ from ._models_py3 import BlobItemInternal
+ from ._models_py3 import BlobMetadata
+ from ._models_py3 import BlobPrefix
+ from ._models_py3 import BlobPropertiesInternal
+ from ._models_py3 import BlobTag
+ from ._models_py3 import BlobTags
+ from ._models_py3 import Block
+ from ._models_py3 import BlockList
+ from ._models_py3 import BlockLookupList
+ from ._models_py3 import ClearRange
+ from ._models_py3 import ContainerCpkScopeInfo
+ from ._models_py3 import ContainerItem
+ from ._models_py3 import ContainerProperties
+ from ._models_py3 import CorsRule
+ from ._models_py3 import CpkInfo
+ from ._models_py3 import CpkScopeInfo
+ from ._models_py3 import DataLakeStorageError, DataLakeStorageErrorException
+ from ._models_py3 import DataLakeStorageErrorError
+ from ._models_py3 import DelimitedTextConfiguration
+ from ._models_py3 import DirectoryHttpHeaders
+ from ._models_py3 import FilterBlobItem
+ from ._models_py3 import FilterBlobSegment
+ from ._models_py3 import GeoReplication
+ from ._models_py3 import JsonTextConfiguration
+ from ._models_py3 import KeyInfo
+ from ._models_py3 import LeaseAccessConditions
+ from ._models_py3 import ListBlobsFlatSegmentResponse
+ from ._models_py3 import ListBlobsHierarchySegmentResponse
+ from ._models_py3 import ListContainersSegmentResponse
+ from ._models_py3 import Logging
+ from ._models_py3 import Metrics
+ from ._models_py3 import ModifiedAccessConditions
+ from ._models_py3 import PageList
+ from ._models_py3 import PageRange
+ from ._models_py3 import QueryFormat
+ from ._models_py3 import QueryRequest
+ from ._models_py3 import QuerySerialization
+ from ._models_py3 import RetentionPolicy
+ from ._models_py3 import SequenceNumberAccessConditions
+ from ._models_py3 import SignedIdentifier
+ from ._models_py3 import SourceModifiedAccessConditions
+ from ._models_py3 import StaticWebsite
+ from ._models_py3 import StorageError, StorageErrorException
+ from ._models_py3 import StorageServiceProperties
+ from ._models_py3 import StorageServiceStats
+ from ._models_py3 import UserDelegationKey
+except (SyntaxError, ImportError):
+ from ._models import AccessPolicy
+ from ._models import AppendPositionAccessConditions
+ from ._models import ArrowConfiguration
+ from ._models import ArrowField
+ from ._models import BlobFlatListSegment
+ from ._models import BlobHierarchyListSegment
+ from ._models import BlobHTTPHeaders
+ from ._models import BlobItemInternal
+ from ._models import BlobMetadata
+ from ._models import BlobPrefix
+ from ._models import BlobPropertiesInternal
+ from ._models import BlobTag
+ from ._models import BlobTags
+ from ._models import Block
+ from ._models import BlockList
+ from ._models import BlockLookupList
+ from ._models import ClearRange
+ from ._models import ContainerCpkScopeInfo
+ from ._models import ContainerItem
+ from ._models import ContainerProperties
+ from ._models import CorsRule
+ from ._models import CpkInfo
+ from ._models import CpkScopeInfo
+ from ._models import DataLakeStorageError, DataLakeStorageErrorException
+ from ._models import DataLakeStorageErrorError
+ from ._models import DelimitedTextConfiguration
+ from ._models import DirectoryHttpHeaders
+ from ._models import FilterBlobItem
+ from ._models import FilterBlobSegment
+ from ._models import GeoReplication
+ from ._models import JsonTextConfiguration
+ from ._models import KeyInfo
+ from ._models import LeaseAccessConditions
+ from ._models import ListBlobsFlatSegmentResponse
+ from ._models import ListBlobsHierarchySegmentResponse
+ from ._models import ListContainersSegmentResponse
+ from ._models import Logging
+ from ._models import Metrics
+ from ._models import ModifiedAccessConditions
+ from ._models import PageList
+ from ._models import PageRange
+ from ._models import QueryFormat
+ from ._models import QueryRequest
+ from ._models import QuerySerialization
+ from ._models import RetentionPolicy
+ from ._models import SequenceNumberAccessConditions
+ from ._models import SignedIdentifier
+ from ._models import SourceModifiedAccessConditions
+ from ._models import StaticWebsite
+ from ._models import StorageError, StorageErrorException
+ from ._models import StorageServiceProperties
+ from ._models import StorageServiceStats
+ from ._models import UserDelegationKey
+from ._azure_blob_storage_enums import (
+ AccessTier,
+ AccessTierOptional,
+ AccessTierRequired,
+ AccountKind,
+ ArchiveStatus,
+ BlobExpiryOptions,
+ BlobType,
+ BlockListType,
+ CopyStatusType,
+ DeleteSnapshotsOptionType,
+ EncryptionAlgorithmType,
+ GeoReplicationStatusType,
+ LeaseDurationType,
+ LeaseStateType,
+ LeaseStatusType,
+ ListBlobsIncludeItem,
+ ListContainersIncludeType,
+ PathRenameMode,
+ PremiumPageBlobAccessTier,
+ PublicAccessType,
+ QueryFormatType,
+ RehydratePriority,
+ SequenceNumberActionType,
+ SkuName,
+ StorageErrorCode,
+ SyncCopyStatusType,
+)
+
+__all__ = [
+ 'AccessPolicy',
+ 'AppendPositionAccessConditions',
+ 'ArrowConfiguration',
+ 'ArrowField',
+ 'BlobFlatListSegment',
+ 'BlobHierarchyListSegment',
+ 'BlobHTTPHeaders',
+ 'BlobItemInternal',
+ 'BlobMetadata',
+ 'BlobPrefix',
+ 'BlobPropertiesInternal',
+ 'BlobTag',
+ 'BlobTags',
+ 'Block',
+ 'BlockList',
+ 'BlockLookupList',
+ 'ClearRange',
+ 'ContainerCpkScopeInfo',
+ 'ContainerItem',
+ 'ContainerProperties',
+ 'CorsRule',
+ 'CpkInfo',
+ 'CpkScopeInfo',
+ 'DataLakeStorageError', 'DataLakeStorageErrorException',
+ 'DataLakeStorageErrorError',
+ 'DelimitedTextConfiguration',
+ 'DirectoryHttpHeaders',
+ 'FilterBlobItem',
+ 'FilterBlobSegment',
+ 'GeoReplication',
+ 'JsonTextConfiguration',
+ 'KeyInfo',
+ 'LeaseAccessConditions',
+ 'ListBlobsFlatSegmentResponse',
+ 'ListBlobsHierarchySegmentResponse',
+ 'ListContainersSegmentResponse',
+ 'Logging',
+ 'Metrics',
+ 'ModifiedAccessConditions',
+ 'PageList',
+ 'PageRange',
+ 'QueryFormat',
+ 'QueryRequest',
+ 'QuerySerialization',
+ 'RetentionPolicy',
+ 'SequenceNumberAccessConditions',
+ 'SignedIdentifier',
+ 'SourceModifiedAccessConditions',
+ 'StaticWebsite',
+ 'StorageError', 'StorageErrorException',
+ 'StorageServiceProperties',
+ 'StorageServiceStats',
+ 'UserDelegationKey',
+ 'PublicAccessType',
+ 'CopyStatusType',
+ 'LeaseDurationType',
+ 'LeaseStateType',
+ 'LeaseStatusType',
+ 'AccessTier',
+ 'ArchiveStatus',
+ 'BlobType',
+ 'RehydratePriority',
+ 'StorageErrorCode',
+ 'GeoReplicationStatusType',
+ 'QueryFormatType',
+ 'AccessTierRequired',
+ 'AccessTierOptional',
+ 'PremiumPageBlobAccessTier',
+ 'BlobExpiryOptions',
+ 'BlockListType',
+ 'DeleteSnapshotsOptionType',
+ 'EncryptionAlgorithmType',
+ 'ListBlobsIncludeItem',
+ 'ListContainersIncludeType',
+ 'PathRenameMode',
+ 'SequenceNumberActionType',
+ 'SkuName',
+ 'AccountKind',
+ 'SyncCopyStatusType',
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_azure_blob_storage_enums.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_azure_blob_storage_enums.py
new file mode 100644
index 00000000000..e45eea3f2f3
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_azure_blob_storage_enums.py
@@ -0,0 +1,343 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum
+
+
+class PublicAccessType(str, Enum):
+
+ container = "container"
+ blob = "blob"
+
+
+class CopyStatusType(str, Enum):
+
+ pending = "pending"
+ success = "success"
+ aborted = "aborted"
+ failed = "failed"
+
+
+class LeaseDurationType(str, Enum):
+
+ infinite = "infinite"
+ fixed = "fixed"
+
+
+class LeaseStateType(str, Enum):
+
+ available = "available"
+ leased = "leased"
+ expired = "expired"
+ breaking = "breaking"
+ broken = "broken"
+
+
+class LeaseStatusType(str, Enum):
+
+ locked = "locked"
+ unlocked = "unlocked"
+
+
+class AccessTier(str, Enum):
+
+ p4 = "P4"
+ p6 = "P6"
+ p10 = "P10"
+ p15 = "P15"
+ p20 = "P20"
+ p30 = "P30"
+ p40 = "P40"
+ p50 = "P50"
+ p60 = "P60"
+ p70 = "P70"
+ p80 = "P80"
+ hot = "Hot"
+ cool = "Cool"
+ archive = "Archive"
+
+
+class ArchiveStatus(str, Enum):
+
+ rehydrate_pending_to_hot = "rehydrate-pending-to-hot"
+ rehydrate_pending_to_cool = "rehydrate-pending-to-cool"
+
+
+class BlobType(str, Enum):
+
+ block_blob = "BlockBlob"
+ page_blob = "PageBlob"
+ append_blob = "AppendBlob"
+
+
+class RehydratePriority(str, Enum):
+
+ high = "High"
+ standard = "Standard"
+
+
+class StorageErrorCode(str, Enum):
+
+ account_already_exists = "AccountAlreadyExists"
+ account_being_created = "AccountBeingCreated"
+ account_is_disabled = "AccountIsDisabled"
+ authentication_failed = "AuthenticationFailed"
+ authorization_failure = "AuthorizationFailure"
+ condition_headers_not_supported = "ConditionHeadersNotSupported"
+ condition_not_met = "ConditionNotMet"
+ empty_metadata_key = "EmptyMetadataKey"
+ insufficient_account_permissions = "InsufficientAccountPermissions"
+ internal_error = "InternalError"
+ invalid_authentication_info = "InvalidAuthenticationInfo"
+ invalid_header_value = "InvalidHeaderValue"
+ invalid_http_verb = "InvalidHttpVerb"
+ invalid_input = "InvalidInput"
+ invalid_md5 = "InvalidMd5"
+ invalid_metadata = "InvalidMetadata"
+ invalid_query_parameter_value = "InvalidQueryParameterValue"
+ invalid_range = "InvalidRange"
+ invalid_resource_name = "InvalidResourceName"
+ invalid_uri = "InvalidUri"
+ invalid_xml_document = "InvalidXmlDocument"
+ invalid_xml_node_value = "InvalidXmlNodeValue"
+ md5_mismatch = "Md5Mismatch"
+ metadata_too_large = "MetadataTooLarge"
+ missing_content_length_header = "MissingContentLengthHeader"
+ missing_required_query_parameter = "MissingRequiredQueryParameter"
+ missing_required_header = "MissingRequiredHeader"
+ missing_required_xml_node = "MissingRequiredXmlNode"
+ multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
+ operation_timed_out = "OperationTimedOut"
+ out_of_range_input = "OutOfRangeInput"
+ out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
+ request_body_too_large = "RequestBodyTooLarge"
+ resource_type_mismatch = "ResourceTypeMismatch"
+ request_url_failed_to_parse = "RequestUrlFailedToParse"
+ resource_already_exists = "ResourceAlreadyExists"
+ resource_not_found = "ResourceNotFound"
+ server_busy = "ServerBusy"
+ unsupported_header = "UnsupportedHeader"
+ unsupported_xml_node = "UnsupportedXmlNode"
+ unsupported_query_parameter = "UnsupportedQueryParameter"
+ unsupported_http_verb = "UnsupportedHttpVerb"
+ append_position_condition_not_met = "AppendPositionConditionNotMet"
+ blob_already_exists = "BlobAlreadyExists"
+ blob_not_found = "BlobNotFound"
+ blob_overwritten = "BlobOverwritten"
+ blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
+ block_count_exceeds_limit = "BlockCountExceedsLimit"
+ block_list_too_long = "BlockListTooLong"
+ cannot_change_to_lower_tier = "CannotChangeToLowerTier"
+ cannot_verify_copy_source = "CannotVerifyCopySource"
+ container_already_exists = "ContainerAlreadyExists"
+ container_being_deleted = "ContainerBeingDeleted"
+ container_disabled = "ContainerDisabled"
+ container_not_found = "ContainerNotFound"
+ content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
+ copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
+ copy_id_mismatch = "CopyIdMismatch"
+ feature_version_mismatch = "FeatureVersionMismatch"
+ incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
+ incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
+ incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
+ infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
+ invalid_blob_or_block = "InvalidBlobOrBlock"
+ invalid_blob_tier = "InvalidBlobTier"
+ invalid_blob_type = "InvalidBlobType"
+ invalid_block_id = "InvalidBlockId"
+ invalid_block_list = "InvalidBlockList"
+ invalid_operation = "InvalidOperation"
+ invalid_page_range = "InvalidPageRange"
+ invalid_source_blob_type = "InvalidSourceBlobType"
+ invalid_source_blob_url = "InvalidSourceBlobUrl"
+ invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
+ lease_already_present = "LeaseAlreadyPresent"
+ lease_already_broken = "LeaseAlreadyBroken"
+ lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
+ lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
+ lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
+ lease_id_missing = "LeaseIdMissing"
+ lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
+ lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
+ lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
+ lease_lost = "LeaseLost"
+ lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
+ lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
+ lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
+ max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
+ no_authentication_information = "NoAuthenticationInformation"
+ no_pending_copy_operation = "NoPendingCopyOperation"
+ operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
+ pending_copy_operation = "PendingCopyOperation"
+ previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
+ previous_snapshot_not_found = "PreviousSnapshotNotFound"
+ previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
+ sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
+ sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
+ snapshot_count_exceeded = "SnapshotCountExceeded"
+ snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
+ snapshots_present = "SnapshotsPresent"
+ source_condition_not_met = "SourceConditionNotMet"
+ system_in_use = "SystemInUse"
+ target_condition_not_met = "TargetConditionNotMet"
+ unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
+ blob_being_rehydrated = "BlobBeingRehydrated"
+ blob_archived = "BlobArchived"
+ blob_not_archived = "BlobNotArchived"
+ authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch"
+ authorization_protocol_mismatch = "AuthorizationProtocolMismatch"
+ authorization_permission_mismatch = "AuthorizationPermissionMismatch"
+ authorization_service_mismatch = "AuthorizationServiceMismatch"
+ authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch"
+
+
+class GeoReplicationStatusType(str, Enum):
+
+ live = "live"
+ bootstrap = "bootstrap"
+ unavailable = "unavailable"
+
+
+class QueryFormatType(str, Enum):
+
+ delimited = "delimited"
+ json = "json"
+ arrow = "arrow"
+
+
+class AccessTierRequired(str, Enum):
+
+ p4 = "P4"
+ p6 = "P6"
+ p10 = "P10"
+ p15 = "P15"
+ p20 = "P20"
+ p30 = "P30"
+ p40 = "P40"
+ p50 = "P50"
+ p60 = "P60"
+ p70 = "P70"
+ p80 = "P80"
+ hot = "Hot"
+ cool = "Cool"
+ archive = "Archive"
+
+
+class AccessTierOptional(str, Enum):
+
+ p4 = "P4"
+ p6 = "P6"
+ p10 = "P10"
+ p15 = "P15"
+ p20 = "P20"
+ p30 = "P30"
+ p40 = "P40"
+ p50 = "P50"
+ p60 = "P60"
+ p70 = "P70"
+ p80 = "P80"
+ hot = "Hot"
+ cool = "Cool"
+ archive = "Archive"
+
+
+class PremiumPageBlobAccessTier(str, Enum):
+
+ p4 = "P4"
+ p6 = "P6"
+ p10 = "P10"
+ p15 = "P15"
+ p20 = "P20"
+ p30 = "P30"
+ p40 = "P40"
+ p50 = "P50"
+ p60 = "P60"
+ p70 = "P70"
+ p80 = "P80"
+
+
+class BlobExpiryOptions(str, Enum):
+
+ never_expire = "NeverExpire"
+ relative_to_creation = "RelativeToCreation"
+ relative_to_now = "RelativeToNow"
+ absolute = "Absolute"
+
+
+class BlockListType(str, Enum):
+
+ committed = "committed"
+ uncommitted = "uncommitted"
+ all = "all"
+
+
+class DeleteSnapshotsOptionType(str, Enum):
+
+ include = "include"
+ only = "only"
+
+
+class EncryptionAlgorithmType(str, Enum):
+
+ aes256 = "AES256"
+
+
+class ListBlobsIncludeItem(str, Enum):
+
+ copy = "copy"
+ deleted = "deleted"
+ metadata = "metadata"
+ snapshots = "snapshots"
+ uncommittedblobs = "uncommittedblobs"
+ versions = "versions"
+ tags = "tags"
+
+
+class ListContainersIncludeType(str, Enum):
+
+ metadata = "metadata"
+ deleted = "deleted"
+
+
+class PathRenameMode(str, Enum):
+
+ legacy = "legacy"
+ posix = "posix"
+
+
+class SequenceNumberActionType(str, Enum):
+
+ max = "max"
+ update = "update"
+ increment = "increment"
+
+
+class SkuName(str, Enum):
+
+ standard_lrs = "Standard_LRS"
+ standard_grs = "Standard_GRS"
+ standard_ragrs = "Standard_RAGRS"
+ standard_zrs = "Standard_ZRS"
+ premium_lrs = "Premium_LRS"
+
+
+class AccountKind(str, Enum):
+
+ storage = "Storage"
+ blob_storage = "BlobStorage"
+ storage_v2 = "StorageV2"
+ file_storage = "FileStorage"
+ block_blob_storage = "BlockBlobStorage"
+
+
+class SyncCopyStatusType(str, Enum):
+
+ success = "success"
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_models.py
new file mode 100644
index 00000000000..1fdddbe96a0
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_models.py
@@ -0,0 +1,2009 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+from azure.core.exceptions import HttpResponseError
+
+
+class AccessPolicy(Model):
+ """An Access policy.
+
+ :param start: the date-time the policy is active
+ :type start: str
+ :param expiry: the date-time the policy expires
+ :type expiry: str
+ :param permission: the permissions for the acl policy
+ :type permission: str
+ """
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
+ 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
+ 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(AccessPolicy, self).__init__(**kwargs)
+ self.start = kwargs.get('start', None)
+ self.expiry = kwargs.get('expiry', None)
+ self.permission = kwargs.get('permission', None)
+
+
+class AppendPositionAccessConditions(Model):
+ """Additional parameters for a set of operations, such as:
+ AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal.
+
+ :param max_size: Optional conditional header. The max length in bytes
+ permitted for the append blob. If the Append Block operation would cause
+ the blob to exceed that limit or if the blob size is already greater than
+ the value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition
+ Failed).
+ :type max_size: long
+ :param append_position: Optional conditional header, used only for the
+ Append Block operation. A number indicating the byte offset to compare.
+ Append Block will succeed only if the append position is equal to this
+ number. If it is not, the request will fail with the
+ AppendPositionConditionNotMet error (HTTP status code 412 - Precondition
+ Failed).
+ :type append_position: long
+ """
+
+ _attribute_map = {
+ 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}},
+ 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(AppendPositionAccessConditions, self).__init__(**kwargs)
+ self.max_size = kwargs.get('max_size', None)
+ self.append_position = kwargs.get('append_position', None)
+
+
+class ArrowConfiguration(Model):
+ """arrow configuration.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param schema: Required.
+ :type schema: list[~azure.storage.blob.models.ArrowField]
+ """
+
+ _validation = {
+ 'schema': {'required': True},
+ }
+
+ _attribute_map = {
+ 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'itemsName': 'Schema', 'wrapped': True}},
+ }
+ _xml_map = {
+ 'name': 'ArrowConfiguration'
+ }
+
+ def __init__(self, **kwargs):
+ super(ArrowConfiguration, self).__init__(**kwargs)
+ self.schema = kwargs.get('schema', None)
+
+
+class ArrowField(Model):
+ """field of an arrow schema.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param type: Required.
+ :type type: str
+ :param name:
+ :type name: str
+ :param precision:
+ :type precision: int
+ :param scale:
+ :type scale: int
+ """
+
+ _validation = {
+ 'type': {'required': True},
+ }
+
+ _attribute_map = {
+ 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}},
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'precision': {'key': 'Precision', 'type': 'int', 'xml': {'name': 'Precision'}},
+ 'scale': {'key': 'Scale', 'type': 'int', 'xml': {'name': 'Scale'}},
+ }
+ _xml_map = {
+ 'name': 'Field'
+ }
+
+ def __init__(self, **kwargs):
+ super(ArrowField, self).__init__(**kwargs)
+ self.type = kwargs.get('type', None)
+ self.name = kwargs.get('name', None)
+ self.precision = kwargs.get('precision', None)
+ self.scale = kwargs.get('scale', None)
+
+
+class BlobFlatListSegment(Model):
+ """BlobFlatListSegment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_items: Required.
+ :type blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+ """
+
+ _validation = {
+ 'blob_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}},
+ }
+ _xml_map = {
+ 'name': 'Blobs'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobFlatListSegment, self).__init__(**kwargs)
+ self.blob_items = kwargs.get('blob_items', None)
+
+
+class BlobHierarchyListSegment(Model):
+ """BlobHierarchyListSegment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_prefixes:
+ :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix]
+ :param blob_items: Required.
+ :type blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+ """
+
+ _validation = {
+ 'blob_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}},
+ 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}},
+ }
+ _xml_map = {
+ 'name': 'Blobs'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobHierarchyListSegment, self).__init__(**kwargs)
+ self.blob_prefixes = kwargs.get('blob_prefixes', None)
+ self.blob_items = kwargs.get('blob_items', None)
+
+
+class BlobHTTPHeaders(Model):
+ """Additional parameters for a set of operations.
+
+ :param blob_cache_control: Optional. Sets the blob's cache control. If
+ specified, this property is stored with the blob and returned with a read
+ request.
+ :type blob_cache_control: str
+ :param blob_content_type: Optional. Sets the blob's content type. If
+ specified, this property is stored with the blob and returned with a read
+ request.
+ :type blob_content_type: str
+ :param blob_content_md5: Optional. An MD5 hash of the blob content. Note
+ that this hash is not validated, as the hashes for the individual blocks
+ were validated when each was uploaded.
+ :type blob_content_md5: bytearray
+ :param blob_content_encoding: Optional. Sets the blob's content encoding.
+ If specified, this property is stored with the blob and returned with a
+ read request.
+ :type blob_content_encoding: str
+ :param blob_content_language: Optional. Set the blob's content language.
+ If specified, this property is stored with the blob and returned with a
+ read request.
+ :type blob_content_language: str
+ :param blob_content_disposition: Optional. Sets the blob's
+ Content-Disposition header.
+ :type blob_content_disposition: str
+ """
+
+ _attribute_map = {
+ 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}},
+ 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}},
+ 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}},
+ 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}},
+ 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}},
+ 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobHTTPHeaders, self).__init__(**kwargs)
+ self.blob_cache_control = kwargs.get('blob_cache_control', None)
+ self.blob_content_type = kwargs.get('blob_content_type', None)
+ self.blob_content_md5 = kwargs.get('blob_content_md5', None)
+ self.blob_content_encoding = kwargs.get('blob_content_encoding', None)
+ self.blob_content_language = kwargs.get('blob_content_language', None)
+ self.blob_content_disposition = kwargs.get('blob_content_disposition', None)
+
+
+class BlobItemInternal(Model):
+ """An Azure Storage blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param deleted: Required.
+ :type deleted: bool
+ :param snapshot: Required.
+ :type snapshot: str
+ :param version_id:
+ :type version_id: str
+ :param is_current_version:
+ :type is_current_version: bool
+ :param properties: Required.
+ :type properties: ~azure.storage.blob.models.BlobPropertiesInternal
+ :param metadata:
+ :type metadata: ~azure.storage.blob.models.BlobMetadata
+ :param blob_tags:
+ :type blob_tags: ~azure.storage.blob.models.BlobTags
+ :param object_replication_metadata:
+ :type object_replication_metadata: dict[str, str]
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'deleted': {'required': True},
+ 'snapshot': {'required': True},
+ 'properties': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
+ 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
+ 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}},
+ 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}},
+ 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}},
+ 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}},
+ 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}},
+ 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}},
+ }
+ _xml_map = {
+ 'name': 'Blob'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobItemInternal, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.deleted = kwargs.get('deleted', None)
+ self.snapshot = kwargs.get('snapshot', None)
+ self.version_id = kwargs.get('version_id', None)
+ self.is_current_version = kwargs.get('is_current_version', None)
+ self.properties = kwargs.get('properties', None)
+ self.metadata = kwargs.get('metadata', None)
+ self.blob_tags = kwargs.get('blob_tags', None)
+ self.object_replication_metadata = kwargs.get('object_replication_metadata', None)
+
+
+class BlobMetadata(Model):
+ """BlobMetadata.
+
+ :param additional_properties: Unmatched properties from the message are
+ deserialized this collection
+ :type additional_properties: dict[str, str]
+ :param encrypted:
+ :type encrypted: str
+ """
+
+ _attribute_map = {
+ 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}},
+ 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}},
+ }
+ _xml_map = {
+ 'name': 'Metadata'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobMetadata, self).__init__(**kwargs)
+ self.additional_properties = kwargs.get('additional_properties', None)
+ self.encrypted = kwargs.get('encrypted', None)
+
+
+class BlobPrefix(Model):
+ """BlobPrefix.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobPrefix, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+
+
+class BlobPropertiesInternal(Model):
+ """Properties of a blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param creation_time:
+ :type creation_time: datetime
+ :param last_modified: Required.
+ :type last_modified: datetime
+ :param etag: Required.
+ :type etag: str
+ :param content_length: Size in bytes
+ :type content_length: long
+ :param content_type:
+ :type content_type: str
+ :param content_encoding:
+ :type content_encoding: str
+ :param content_language:
+ :type content_language: str
+ :param content_md5:
+ :type content_md5: bytearray
+ :param content_disposition:
+ :type content_disposition: str
+ :param cache_control:
+ :type cache_control: str
+ :param blob_sequence_number:
+ :type blob_sequence_number: long
+ :param blob_type: Possible values include: 'BlockBlob', 'PageBlob',
+ 'AppendBlob'
+ :type blob_type: str or ~azure.storage.blob.models.BlobType
+ :param lease_status: Possible values include: 'locked', 'unlocked'
+ :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+ :param lease_state: Possible values include: 'available', 'leased',
+ 'expired', 'breaking', 'broken'
+ :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
+ :param lease_duration: Possible values include: 'infinite', 'fixed'
+ :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+ :param copy_id:
+ :type copy_id: str
+ :param copy_status: Possible values include: 'pending', 'success',
+ 'aborted', 'failed'
+ :type copy_status: str or ~azure.storage.blob.models.CopyStatusType
+ :param copy_source:
+ :type copy_source: str
+ :param copy_progress:
+ :type copy_progress: str
+ :param copy_completion_time:
+ :type copy_completion_time: datetime
+ :param copy_status_description:
+ :type copy_status_description: str
+ :param server_encrypted:
+ :type server_encrypted: bool
+ :param incremental_copy:
+ :type incremental_copy: bool
+ :param destination_snapshot:
+ :type destination_snapshot: str
+ :param deleted_time:
+ :type deleted_time: datetime
+ :param remaining_retention_days:
+ :type remaining_retention_days: int
+ :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15',
+ 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type access_tier: str or ~azure.storage.blob.models.AccessTier
+ :param access_tier_inferred:
+ :type access_tier_inferred: bool
+ :param archive_status: Possible values include:
+ 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool'
+ :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus
+ :param customer_provided_key_sha256:
+ :type customer_provided_key_sha256: str
+ :param encryption_scope: The name of the encryption scope under which the
+ blob is encrypted.
+ :type encryption_scope: str
+ :param access_tier_change_time:
+ :type access_tier_change_time: datetime
+ :param tag_count:
+ :type tag_count: int
+ :param expires_on:
+ :type expires_on: datetime
+ :param is_sealed:
+ :type is_sealed: bool
+ :param rehydrate_priority: Possible values include: 'High', 'Standard'
+ :type rehydrate_priority: str or
+ ~azure.storage.blob.models.RehydratePriority
+ :param last_accessed_on:
+ :type last_accessed_on: datetime
+ """
+
+ _validation = {
+ 'last_modified': {'required': True},
+ 'etag': {'required': True},
+ }
+
+ _attribute_map = {
+ 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}},
+ 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
+ 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
+ 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
+ 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}},
+ 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}},
+ 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}},
+ 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}},
+ 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}},
+ 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}},
+ 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}},
+ 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}},
+ 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
+ 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
+ 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
+ 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}},
+ 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}},
+ 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}},
+ 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}},
+ 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}},
+ 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}},
+ 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}},
+ 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}},
+ 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}},
+ 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
+ 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
+ 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}},
+ 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}},
+ 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}},
+ 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}},
+ 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}},
+ 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}},
+ 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}},
+ 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}},
+ 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}},
+ 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}},
+ 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123', 'xml': {'name': 'LastAccessTime'}},
+ }
+ _xml_map = {
+ 'name': 'Properties'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobPropertiesInternal, self).__init__(**kwargs)
+ self.creation_time = kwargs.get('creation_time', None)
+ self.last_modified = kwargs.get('last_modified', None)
+ self.etag = kwargs.get('etag', None)
+ self.content_length = kwargs.get('content_length', None)
+ self.content_type = kwargs.get('content_type', None)
+ self.content_encoding = kwargs.get('content_encoding', None)
+ self.content_language = kwargs.get('content_language', None)
+ self.content_md5 = kwargs.get('content_md5', None)
+ self.content_disposition = kwargs.get('content_disposition', None)
+ self.cache_control = kwargs.get('cache_control', None)
+ self.blob_sequence_number = kwargs.get('blob_sequence_number', None)
+ self.blob_type = kwargs.get('blob_type', None)
+ self.lease_status = kwargs.get('lease_status', None)
+ self.lease_state = kwargs.get('lease_state', None)
+ self.lease_duration = kwargs.get('lease_duration', None)
+ self.copy_id = kwargs.get('copy_id', None)
+ self.copy_status = kwargs.get('copy_status', None)
+ self.copy_source = kwargs.get('copy_source', None)
+ self.copy_progress = kwargs.get('copy_progress', None)
+ self.copy_completion_time = kwargs.get('copy_completion_time', None)
+ self.copy_status_description = kwargs.get('copy_status_description', None)
+ self.server_encrypted = kwargs.get('server_encrypted', None)
+ self.incremental_copy = kwargs.get('incremental_copy', None)
+ self.destination_snapshot = kwargs.get('destination_snapshot', None)
+ self.deleted_time = kwargs.get('deleted_time', None)
+ self.remaining_retention_days = kwargs.get('remaining_retention_days', None)
+ self.access_tier = kwargs.get('access_tier', None)
+ self.access_tier_inferred = kwargs.get('access_tier_inferred', None)
+ self.archive_status = kwargs.get('archive_status', None)
+ self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None)
+ self.encryption_scope = kwargs.get('encryption_scope', None)
+ self.access_tier_change_time = kwargs.get('access_tier_change_time', None)
+ self.tag_count = kwargs.get('tag_count', None)
+ self.expires_on = kwargs.get('expires_on', None)
+ self.is_sealed = kwargs.get('is_sealed', None)
+ self.rehydrate_priority = kwargs.get('rehydrate_priority', None)
+ self.last_accessed_on = kwargs.get('last_accessed_on', None)
+
+
+class BlobTag(Model):
+ """BlobTag.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param key: Required.
+ :type key: str
+ :param value: Required.
+ :type value: str
+ """
+
+ _validation = {
+ 'key': {'required': True},
+ 'value': {'required': True},
+ }
+
+ _attribute_map = {
+ 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}},
+ 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
+ }
+ _xml_map = {
+ 'name': 'Tag'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobTag, self).__init__(**kwargs)
+ self.key = kwargs.get('key', None)
+ self.value = kwargs.get('value', None)
+
+
+class BlobTags(Model):
+ """Blob tags.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_tag_set: Required.
+ :type blob_tag_set: list[~azure.storage.blob.models.BlobTag]
+ """
+
+ _validation = {
+ 'blob_tag_set': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}},
+ }
+ _xml_map = {
+ 'name': 'Tags'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlobTags, self).__init__(**kwargs)
+ self.blob_tag_set = kwargs.get('blob_tag_set', None)
+
+
+class Block(Model):
+ """Represents a single block in a block blob. It describes the block's ID and
+ size.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. The base64 encoded block ID.
+ :type name: str
+ :param size: Required. The block size in bytes.
+ :type size: int
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'size': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(Block, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.size = kwargs.get('size', None)
+
+
+class BlockList(Model):
+ """BlockList.
+
+ :param committed_blocks:
+ :type committed_blocks: list[~azure.storage.blob.models.Block]
+ :param uncommitted_blocks:
+ :type uncommitted_blocks: list[~azure.storage.blob.models.Block]
+ """
+
+ _attribute_map = {
+ 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
+ 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(BlockList, self).__init__(**kwargs)
+ self.committed_blocks = kwargs.get('committed_blocks', None)
+ self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None)
+
+
+class BlockLookupList(Model):
+ """BlockLookupList.
+
+ :param committed:
+ :type committed: list[str]
+ :param uncommitted:
+ :type uncommitted: list[str]
+ :param latest:
+ :type latest: list[str]
+ """
+
+ _attribute_map = {
+ 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}},
+ 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}},
+ 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}},
+ }
+ _xml_map = {
+ 'name': 'BlockList'
+ }
+
+ def __init__(self, **kwargs):
+ super(BlockLookupList, self).__init__(**kwargs)
+ self.committed = kwargs.get('committed', None)
+ self.uncommitted = kwargs.get('uncommitted', None)
+ self.latest = kwargs.get('latest', None)
+
+
+class ClearRange(Model):
+ """ClearRange.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param start: Required.
+ :type start: long
+ :param end: Required.
+ :type end: long
+ """
+
+ _validation = {
+ 'start': {'required': True},
+ 'end': {'required': True},
+ }
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
+ 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
+ }
+ _xml_map = {
+ 'name': 'ClearRange'
+ }
+
+ def __init__(self, **kwargs):
+ super(ClearRange, self).__init__(**kwargs)
+ self.start = kwargs.get('start', None)
+ self.end = kwargs.get('end', None)
+
+
+class ContainerCpkScopeInfo(Model):
+ """Additional parameters for create operation.
+
+ :param default_encryption_scope: Optional. Version 2019-07-07 and later.
+ Specifies the default encryption scope to set on the container and use for
+ all future writes.
+ :type default_encryption_scope: str
+ :param prevent_encryption_scope_override: Optional. Version 2019-07-07
+ and newer. If true, prevents any request from specifying a different
+ encryption scope than the scope set on the container.
+ :type prevent_encryption_scope_override: bool
+ """
+
+ _attribute_map = {
+ 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}},
+ 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(ContainerCpkScopeInfo, self).__init__(**kwargs)
+ self.default_encryption_scope = kwargs.get('default_encryption_scope', None)
+ self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None)
+
+
+class ContainerItem(Model):
+ """An Azure Storage container.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param deleted:
+ :type deleted: bool
+ :param version:
+ :type version: str
+ :param properties: Required.
+ :type properties: ~azure.storage.blob.models.ContainerProperties
+ :param metadata:
+ :type metadata: dict[str, str]
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'properties': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
+ 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
+ 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}},
+ 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
+ }
+ _xml_map = {
+ 'name': 'Container'
+ }
+
+ def __init__(self, **kwargs):
+ super(ContainerItem, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.deleted = kwargs.get('deleted', None)
+ self.version = kwargs.get('version', None)
+ self.properties = kwargs.get('properties', None)
+ self.metadata = kwargs.get('metadata', None)
+
+
+class ContainerProperties(Model):
+ """Properties of a container.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_modified: Required.
+ :type last_modified: datetime
+ :param etag: Required.
+ :type etag: str
+ :param lease_status: Possible values include: 'locked', 'unlocked'
+ :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+ :param lease_state: Possible values include: 'available', 'leased',
+ 'expired', 'breaking', 'broken'
+ :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
+ :param lease_duration: Possible values include: 'infinite', 'fixed'
+ :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+ :param public_access: Possible values include: 'container', 'blob'
+ :type public_access: str or ~azure.storage.blob.models.PublicAccessType
+ :param has_immutability_policy:
+ :type has_immutability_policy: bool
+ :param has_legal_hold:
+ :type has_legal_hold: bool
+ :param default_encryption_scope:
+ :type default_encryption_scope: str
+ :param prevent_encryption_scope_override:
+ :type prevent_encryption_scope_override: bool
+ :param deleted_time:
+ :type deleted_time: datetime
+ :param remaining_retention_days:
+ :type remaining_retention_days: int
+ """
+
+ _validation = {
+ 'last_modified': {'required': True},
+ 'etag': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
+ 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
+ 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
+ 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
+ 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
+ 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}},
+ 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}},
+ 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}},
+ 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}},
+ 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}},
+ 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
+ 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(ContainerProperties, self).__init__(**kwargs)
+ self.last_modified = kwargs.get('last_modified', None)
+ self.etag = kwargs.get('etag', None)
+ self.lease_status = kwargs.get('lease_status', None)
+ self.lease_state = kwargs.get('lease_state', None)
+ self.lease_duration = kwargs.get('lease_duration', None)
+ self.public_access = kwargs.get('public_access', None)
+ self.has_immutability_policy = kwargs.get('has_immutability_policy', None)
+ self.has_legal_hold = kwargs.get('has_legal_hold', None)
+ self.default_encryption_scope = kwargs.get('default_encryption_scope', None)
+ self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None)
+ self.deleted_time = kwargs.get('deleted_time', None)
+ self.remaining_retention_days = kwargs.get('remaining_retention_days', None)
+
+
+class CorsRule(Model):
+ """CORS is an HTTP feature that enables a web application running under one
+ domain to access resources in another domain. Web browsers implement a
+ security restriction known as same-origin policy that prevents a web page
+ from calling APIs in a different domain; CORS provides a secure way to
+ allow one domain (the origin domain) to call APIs in another domain.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param allowed_origins: Required. The origin domains that are permitted to
+ make a request against the storage service via CORS. The origin domain is
+ the domain from which the request originates. Note that the origin must be
+ an exact case-sensitive match with the origin that the user age sends to
+ the service. You can also use the wildcard character '*' to allow all
+ origin domains to make requests via CORS.
+ :type allowed_origins: str
+ :param allowed_methods: Required. The methods (HTTP request verbs) that
+ the origin domain may use for a CORS request. (comma separated)
+ :type allowed_methods: str
+ :param allowed_headers: Required. the request headers that the origin
+ domain may specify on the CORS request.
+ :type allowed_headers: str
+ :param exposed_headers: Required. The response headers that may be sent in
+ the response to the CORS request and exposed by the browser to the request
+ issuer
+ :type exposed_headers: str
+ :param max_age_in_seconds: Required. The maximum amount time that a
+ browser should cache the preflight OPTIONS request.
+ :type max_age_in_seconds: int
+ """
+
+ _validation = {
+ 'allowed_origins': {'required': True},
+ 'allowed_methods': {'required': True},
+ 'allowed_headers': {'required': True},
+ 'exposed_headers': {'required': True},
+ 'max_age_in_seconds': {'required': True, 'minimum': 0},
+ }
+
+ _attribute_map = {
+ 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
+ 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
+ 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
+ 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
+ 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(CorsRule, self).__init__(**kwargs)
+ self.allowed_origins = kwargs.get('allowed_origins', None)
+ self.allowed_methods = kwargs.get('allowed_methods', None)
+ self.allowed_headers = kwargs.get('allowed_headers', None)
+ self.exposed_headers = kwargs.get('exposed_headers', None)
+ self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None)
+
+
+class CpkInfo(Model):
+ """Additional parameters for a set of operations.
+
+ :param encryption_key: Optional. Specifies the encryption key to use to
+ encrypt the data provided in the request. If not specified, encryption is
+ performed with the root account encryption key. For more information, see
+ Encryption at Rest for Azure Storage Services.
+ :type encryption_key: str
+ :param encryption_key_sha256: The SHA-256 hash of the provided encryption
+ key. Must be provided if the x-ms-encryption-key header is provided.
+ :type encryption_key_sha256: str
+ :param encryption_algorithm: The algorithm used to produce the encryption
+ key hash. Currently, the only accepted value is "AES256". Must be provided
+ if the x-ms-encryption-key header is provided. Possible values include:
+ 'AES256'
+ :type encryption_algorithm: str or
+ ~azure.storage.blob.models.EncryptionAlgorithmType
+ """
+
+ _attribute_map = {
+ 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}},
+ 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}},
+ 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(CpkInfo, self).__init__(**kwargs)
+ self.encryption_key = kwargs.get('encryption_key', None)
+ self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None)
+ self.encryption_algorithm = kwargs.get('encryption_algorithm', None)
+
+
+class CpkScopeInfo(Model):
+ """Additional parameters for a set of operations.
+
+ :param encryption_scope: Optional. Version 2019-07-07 and later.
+ Specifies the name of the encryption scope to use to encrypt the data
+ provided in the request. If not specified, encryption is performed with
+ the default account encryption scope. For more information, see
+ Encryption at Rest for Azure Storage Services.
+ :type encryption_scope: str
+ """
+
+ _attribute_map = {
+ 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(CpkScopeInfo, self).__init__(**kwargs)
+ self.encryption_scope = kwargs.get('encryption_scope', None)
+
+
+class DataLakeStorageError(Model):
+ """DataLakeStorageError.
+
+ :param data_lake_storage_error_details: The service error response object.
+ :type data_lake_storage_error_details:
+ ~azure.storage.blob.models.DataLakeStorageErrorError
+ """
+
+ _attribute_map = {
+ 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(DataLakeStorageError, self).__init__(**kwargs)
+ self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None)
+
+
+class DataLakeStorageErrorException(HttpResponseError):
+ """Server responsed with exception of type: 'DataLakeStorageError'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, response, deserialize, *args):
+
+ model_name = 'DataLakeStorageError'
+ self.error = deserialize(model_name, response)
+ if self.error is None:
+ self.error = deserialize.dependencies[model_name]()
+ super(DataLakeStorageErrorException, self).__init__(response=response)
+
+
+class DataLakeStorageErrorError(Model):
+ """The service error response object.
+
+ :param code: The service error code.
+ :type code: str
+ :param message: The service error message.
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}},
+ 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(DataLakeStorageErrorError, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+
+
+class DelimitedTextConfiguration(Model):
+ """delimited text configuration.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param column_separator: Required. column separator
+ :type column_separator: str
+ :param field_quote: Required. field quote
+ :type field_quote: str
+ :param record_separator: Required. record separator
+ :type record_separator: str
+ :param escape_char: Required. escape char
+ :type escape_char: str
+ :param headers_present: Required. has headers
+ :type headers_present: bool
+ """
+
+ _validation = {
+ 'column_separator': {'required': True},
+ 'field_quote': {'required': True},
+ 'record_separator': {'required': True},
+ 'escape_char': {'required': True},
+ 'headers_present': {'required': True},
+ }
+
+ _attribute_map = {
+ 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}},
+ 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}},
+ 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}},
+ 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}},
+ 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}},
+ }
+ _xml_map = {
+ 'name': 'DelimitedTextConfiguration'
+ }
+
+ def __init__(self, **kwargs):
+ super(DelimitedTextConfiguration, self).__init__(**kwargs)
+ self.column_separator = kwargs.get('column_separator', None)
+ self.field_quote = kwargs.get('field_quote', None)
+ self.record_separator = kwargs.get('record_separator', None)
+ self.escape_char = kwargs.get('escape_char', None)
+ self.headers_present = kwargs.get('headers_present', None)
+
+
+class DirectoryHttpHeaders(Model):
+ """Additional parameters for a set of operations, such as: Directory_create,
+ Directory_rename, Blob_rename.
+
+ :param cache_control: Cache control for given resource
+ :type cache_control: str
+ :param content_type: Content type for given resource
+ :type content_type: str
+ :param content_encoding: Content encoding for given resource
+ :type content_encoding: str
+ :param content_language: Content language for given resource
+ :type content_language: str
+ :param content_disposition: Content disposition for given resource
+ :type content_disposition: str
+ """
+
+ _attribute_map = {
+ 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}},
+ 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}},
+ 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}},
+ 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}},
+ 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(DirectoryHttpHeaders, self).__init__(**kwargs)
+ self.cache_control = kwargs.get('cache_control', None)
+ self.content_type = kwargs.get('content_type', None)
+ self.content_encoding = kwargs.get('content_encoding', None)
+ self.content_language = kwargs.get('content_language', None)
+ self.content_disposition = kwargs.get('content_disposition', None)
+
+
+class FilterBlobItem(Model):
+ """Blob info from a Filter Blobs API call.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param container_name: Required.
+ :type container_name: str
+ :param tag_value: Required.
+ :type tag_value: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'container_name': {'required': True},
+ 'tag_value': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}},
+ 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}},
+ }
+ _xml_map = {
+ 'name': 'Blob'
+ }
+
+ def __init__(self, **kwargs):
+ super(FilterBlobItem, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.container_name = kwargs.get('container_name', None)
+ self.tag_value = kwargs.get('tag_value', None)
+
+
+class FilterBlobSegment(Model):
+ """The result of a Filter Blobs API call.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param where: Required.
+ :type where: str
+ :param blobs: Required.
+ :type blobs: list[~azure.storage.blob.models.FilterBlobItem]
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'where': {'required': True},
+ 'blobs': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}},
+ 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, **kwargs):
+ super(FilterBlobSegment, self).__init__(**kwargs)
+ self.service_endpoint = kwargs.get('service_endpoint', None)
+ self.where = kwargs.get('where', None)
+ self.blobs = kwargs.get('blobs', None)
+ self.next_marker = kwargs.get('next_marker', None)
+
+
+class GeoReplication(Model):
+ """Geo-Replication information for the Secondary Storage Service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param status: Required. The status of the secondary location. Possible
+ values include: 'live', 'bootstrap', 'unavailable'
+ :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType
+ :param last_sync_time: Required. A GMT date/time value, to the second. All
+ primary writes preceding this value are guaranteed to be available for
+ read operations at the secondary. Primary writes after this point in time
+ may or may not be available for reads.
+ :type last_sync_time: datetime
+ """
+
+ _validation = {
+ 'status': {'required': True},
+ 'last_sync_time': {'required': True},
+ }
+
+ _attribute_map = {
+ 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}},
+ 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(GeoReplication, self).__init__(**kwargs)
+ self.status = kwargs.get('status', None)
+ self.last_sync_time = kwargs.get('last_sync_time', None)
+
+
+class JsonTextConfiguration(Model):
+ """json text configuration.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param record_separator: Required. record separator
+ :type record_separator: str
+ """
+
+ _validation = {
+ 'record_separator': {'required': True},
+ }
+
+ _attribute_map = {
+ 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}},
+ }
+ _xml_map = {
+ 'name': 'JsonTextConfiguration'
+ }
+
+ def __init__(self, **kwargs):
+ super(JsonTextConfiguration, self).__init__(**kwargs)
+ self.record_separator = kwargs.get('record_separator', None)
+
+
+class KeyInfo(Model):
+ """Key information.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param start: Required. The date-time the key is active in ISO 8601 UTC
+ time
+ :type start: str
+ :param expiry: Required. The date-time the key expires in ISO 8601 UTC
+ time
+ :type expiry: str
+ """
+
+ _validation = {
+ 'start': {'required': True},
+ 'expiry': {'required': True},
+ }
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
+ 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(KeyInfo, self).__init__(**kwargs)
+ self.start = kwargs.get('start', None)
+ self.expiry = kwargs.get('expiry', None)
+
+
+class LeaseAccessConditions(Model):
+ """Additional parameters for a set of operations.
+
+ :param lease_id: If specified, the operation only succeeds if the
+ resource's lease is active and matches this ID.
+ :type lease_id: str
+ """
+
+ _attribute_map = {
+ 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(LeaseAccessConditions, self).__init__(**kwargs)
+ self.lease_id = kwargs.get('lease_id', None)
+
+
+class ListBlobsFlatSegmentResponse(Model):
+ """An enumeration of blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param container_name: Required.
+ :type container_name: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param segment: Required.
+ :type segment: ~azure.storage.blob.models.BlobFlatListSegment
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_name': {'required': True},
+ 'segment': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
+ 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
+ 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
+ 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, **kwargs):
+ super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = kwargs.get('service_endpoint', None)
+ self.container_name = kwargs.get('container_name', None)
+ self.prefix = kwargs.get('prefix', None)
+ self.marker = kwargs.get('marker', None)
+ self.max_results = kwargs.get('max_results', None)
+ self.segment = kwargs.get('segment', None)
+ self.next_marker = kwargs.get('next_marker', None)
+
+
+class ListBlobsHierarchySegmentResponse(Model):
+ """An enumeration of blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param container_name: Required.
+ :type container_name: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param delimiter:
+ :type delimiter: str
+ :param segment: Required.
+ :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_name': {'required': True},
+ 'segment': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
+ 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
+ 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
+ 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}},
+ 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, **kwargs):
+ super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = kwargs.get('service_endpoint', None)
+ self.container_name = kwargs.get('container_name', None)
+ self.prefix = kwargs.get('prefix', None)
+ self.marker = kwargs.get('marker', None)
+ self.max_results = kwargs.get('max_results', None)
+ self.delimiter = kwargs.get('delimiter', None)
+ self.segment = kwargs.get('segment', None)
+ self.next_marker = kwargs.get('next_marker', None)
+
+
+class ListContainersSegmentResponse(Model):
+ """An enumeration of containers.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param container_items: Required.
+ :type container_items: list[~azure.storage.blob.models.ContainerItem]
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
+ 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
+ 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
+ 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, **kwargs):
+ super(ListContainersSegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = kwargs.get('service_endpoint', None)
+ self.prefix = kwargs.get('prefix', None)
+ self.marker = kwargs.get('marker', None)
+ self.max_results = kwargs.get('max_results', None)
+ self.container_items = kwargs.get('container_items', None)
+ self.next_marker = kwargs.get('next_marker', None)
+
+
+class Logging(Model):
+ """Azure Analytics Logging settings.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param version: Required. The version of Storage Analytics to configure.
+ :type version: str
+ :param delete: Required. Indicates whether all delete requests should be
+ logged.
+ :type delete: bool
+ :param read: Required. Indicates whether all read requests should be
+ logged.
+ :type read: bool
+ :param write: Required. Indicates whether all write requests should be
+ logged.
+ :type write: bool
+ :param retention_policy: Required.
+ :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
+ """
+
+ _validation = {
+ 'version': {'required': True},
+ 'delete': {'required': True},
+ 'read': {'required': True},
+ 'write': {'required': True},
+ 'retention_policy': {'required': True},
+ }
+
+ _attribute_map = {
+ 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
+ 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}},
+ 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}},
+ 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}},
+ 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(Logging, self).__init__(**kwargs)
+ self.version = kwargs.get('version', None)
+ self.delete = kwargs.get('delete', None)
+ self.read = kwargs.get('read', None)
+ self.write = kwargs.get('write', None)
+ self.retention_policy = kwargs.get('retention_policy', None)
+
+
+class Metrics(Model):
+ """a summary of request statistics grouped by API in hour or minute aggregates
+ for blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param version: The version of Storage Analytics to configure.
+ :type version: str
+ :param enabled: Required. Indicates whether metrics are enabled for the
+ Blob service.
+ :type enabled: bool
+ :param include_apis: Indicates whether metrics should generate summary
+ statistics for called API operations.
+ :type include_apis: bool
+ :param retention_policy:
+ :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
+ """
+
+ _validation = {
+ 'enabled': {'required': True},
+ }
+
+ _attribute_map = {
+ 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
+ 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
+ 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
+ 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(Metrics, self).__init__(**kwargs)
+ self.version = kwargs.get('version', None)
+ self.enabled = kwargs.get('enabled', None)
+ self.include_apis = kwargs.get('include_apis', None)
+ self.retention_policy = kwargs.get('retention_policy', None)
+
+
+class ModifiedAccessConditions(Model):
+ """Additional parameters for a set of operations.
+
+ :param if_modified_since: Specify this header value to operate only on a
+ blob if it has been modified since the specified date/time.
+ :type if_modified_since: datetime
+ :param if_unmodified_since: Specify this header value to operate only on a
+ blob if it has not been modified since the specified date/time.
+ :type if_unmodified_since: datetime
+ :param if_match: Specify an ETag value to operate only on blobs with a
+ matching value.
+ :type if_match: str
+ :param if_none_match: Specify an ETag value to operate only on blobs
+ without a matching value.
+ :type if_none_match: str
+ :param if_tags: Specify a SQL where clause on blob tags to operate only on
+ blobs with a matching value.
+ :type if_tags: str
+ """
+
+ _attribute_map = {
+ 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}},
+ 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}},
+ 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}},
+ 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}},
+ 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(ModifiedAccessConditions, self).__init__(**kwargs)
+ self.if_modified_since = kwargs.get('if_modified_since', None)
+ self.if_unmodified_since = kwargs.get('if_unmodified_since', None)
+ self.if_match = kwargs.get('if_match', None)
+ self.if_none_match = kwargs.get('if_none_match', None)
+ self.if_tags = kwargs.get('if_tags', None)
+
+
+class PageList(Model):
+ """the list of pages.
+
+ :param page_range:
+ :type page_range: list[~azure.storage.blob.models.PageRange]
+ :param clear_range:
+ :type clear_range: list[~azure.storage.blob.models.ClearRange]
+ """
+
+ _attribute_map = {
+ 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}},
+ 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(PageList, self).__init__(**kwargs)
+ self.page_range = kwargs.get('page_range', None)
+ self.clear_range = kwargs.get('clear_range', None)
+
+
+class PageRange(Model):
+ """PageRange.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param start: Required.
+ :type start: long
+ :param end: Required.
+ :type end: long
+ """
+
+ _validation = {
+ 'start': {'required': True},
+ 'end': {'required': True},
+ }
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
+ 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
+ }
+ _xml_map = {
+ 'name': 'PageRange'
+ }
+
+ def __init__(self, **kwargs):
+ super(PageRange, self).__init__(**kwargs)
+ self.start = kwargs.get('start', None)
+ self.end = kwargs.get('end', None)
+
+
+class QueryFormat(Model):
+ """QueryFormat.
+
+ :param type: Possible values include: 'delimited', 'json', 'arrow'
+ :type type: str or ~azure.storage.blob.models.QueryFormatType
+ :param delimited_text_configuration:
+ :type delimited_text_configuration:
+ ~azure.storage.blob.models.DelimitedTextConfiguration
+ :param json_text_configuration:
+ :type json_text_configuration:
+ ~azure.storage.blob.models.JsonTextConfiguration
+ :param arrow_configuration:
+ :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration
+ """
+
+ _attribute_map = {
+ 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}},
+ 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}},
+ 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}},
+ 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration', 'xml': {'name': 'ArrowConfiguration'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(QueryFormat, self).__init__(**kwargs)
+ self.type = kwargs.get('type', None)
+ self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None)
+ self.json_text_configuration = kwargs.get('json_text_configuration', None)
+ self.arrow_configuration = kwargs.get('arrow_configuration', None)
+
+
+class QueryRequest(Model):
+ """the quick query body.
+
+ Variables are only populated by the server, and will be ignored when
+ sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar query_type: Required. the query type. Default value: "SQL" .
+ :vartype query_type: str
+ :param expression: Required. a query statement
+ :type expression: str
+ :param input_serialization:
+ :type input_serialization: ~azure.storage.blob.models.QuerySerialization
+ :param output_serialization:
+ :type output_serialization: ~azure.storage.blob.models.QuerySerialization
+ """
+
+ _validation = {
+ 'query_type': {'required': True, 'constant': True},
+ 'expression': {'required': True},
+ }
+
+ _attribute_map = {
+ 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}},
+ 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}},
+ 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}},
+ 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}},
+ }
+ _xml_map = {
+ 'name': 'QueryRequest'
+ }
+
+ query_type = "SQL"
+
+ def __init__(self, **kwargs):
+ super(QueryRequest, self).__init__(**kwargs)
+ self.expression = kwargs.get('expression', None)
+ self.input_serialization = kwargs.get('input_serialization', None)
+ self.output_serialization = kwargs.get('output_serialization', None)
+
+
+class QuerySerialization(Model):
+ """QuerySerialization.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param format: Required.
+ :type format: ~azure.storage.blob.models.QueryFormat
+ """
+
+ _validation = {
+ 'format': {'required': True},
+ }
+
+ _attribute_map = {
+ 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(QuerySerialization, self).__init__(**kwargs)
+ self.format = kwargs.get('format', None)
+
+
+class RetentionPolicy(Model):
+ """the retention policy which determines how long the associated data should
+ persist.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param enabled: Required. Indicates whether a retention policy is enabled
+ for the storage service
+ :type enabled: bool
+ :param days: Indicates the number of days that metrics or logging or
+ soft-deleted data should be retained. All data older than this value will
+ be deleted
+ :type days: int
+ """
+
+ _validation = {
+ 'enabled': {'required': True},
+ 'days': {'minimum': 1},
+ }
+
+ _attribute_map = {
+ 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
+ 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(RetentionPolicy, self).__init__(**kwargs)
+ self.enabled = kwargs.get('enabled', None)
+ self.days = kwargs.get('days', None)
+
+
+class SequenceNumberAccessConditions(Model):
+ """Additional parameters for a set of operations, such as:
+ PageBlob_upload_pages, PageBlob_clear_pages,
+ PageBlob_upload_pages_from_url.
+
+ :param if_sequence_number_less_than_or_equal_to: Specify this header value
+ to operate only on a blob if it has a sequence number less than or equal
+ to the specified.
+ :type if_sequence_number_less_than_or_equal_to: long
+ :param if_sequence_number_less_than: Specify this header value to operate
+ only on a blob if it has a sequence number less than the specified.
+ :type if_sequence_number_less_than: long
+ :param if_sequence_number_equal_to: Specify this header value to operate
+ only on a blob if it has the specified sequence number.
+ :type if_sequence_number_equal_to: long
+ """
+
+ _attribute_map = {
+ 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}},
+ 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}},
+ 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(SequenceNumberAccessConditions, self).__init__(**kwargs)
+ self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None)
+ self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None)
+ self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None)
+
+
+class SignedIdentifier(Model):
+ """signed identifier.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. a unique id
+ :type id: str
+ :param access_policy:
+ :type access_policy: ~azure.storage.blob.models.AccessPolicy
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
+ 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
+ }
+ _xml_map = {
+ 'name': 'SignedIdentifier'
+ }
+
+ def __init__(self, **kwargs):
+ super(SignedIdentifier, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.access_policy = kwargs.get('access_policy', None)
+
+
+class SourceModifiedAccessConditions(Model):
+ """Additional parameters for a set of operations.
+
+ :param source_if_modified_since: Specify this header value to operate only
+ on a blob if it has been modified since the specified date/time.
+ :type source_if_modified_since: datetime
+ :param source_if_unmodified_since: Specify this header value to operate
+ only on a blob if it has not been modified since the specified date/time.
+ :type source_if_unmodified_since: datetime
+ :param source_if_match: Specify an ETag value to operate only on blobs
+ with a matching value.
+ :type source_if_match: str
+ :param source_if_none_match: Specify an ETag value to operate only on
+ blobs without a matching value.
+ :type source_if_none_match: str
+ :param source_if_tags: Specify a SQL where clause on blob tags to operate
+ only on blobs with a matching value.
+ :type source_if_tags: str
+ """
+
+ _attribute_map = {
+ 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}},
+ 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}},
+ 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}},
+ 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}},
+ 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(SourceModifiedAccessConditions, self).__init__(**kwargs)
+ self.source_if_modified_since = kwargs.get('source_if_modified_since', None)
+ self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None)
+ self.source_if_match = kwargs.get('source_if_match', None)
+ self.source_if_none_match = kwargs.get('source_if_none_match', None)
+ self.source_if_tags = kwargs.get('source_if_tags', None)
+
+
+class StaticWebsite(Model):
+ """The properties that enable an account to host a static website.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param enabled: Required. Indicates whether this account is hosting a
+ static website
+ :type enabled: bool
+ :param index_document: The default name of the index page under each
+ directory
+ :type index_document: str
+ :param error_document404_path: The absolute path of the custom 404 page
+ :type error_document404_path: str
+ :param default_index_document_path: Absolute path of the default index
+ page
+ :type default_index_document_path: str
+ """
+
+ _validation = {
+ 'enabled': {'required': True},
+ }
+
+ _attribute_map = {
+ 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
+ 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}},
+ 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}},
+ 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(StaticWebsite, self).__init__(**kwargs)
+ self.enabled = kwargs.get('enabled', None)
+ self.index_document = kwargs.get('index_document', None)
+ self.error_document404_path = kwargs.get('error_document404_path', None)
+ self.default_index_document_path = kwargs.get('default_index_document_path', None)
+
+
+class StorageError(Model):
+ """StorageError.
+
+ :param message:
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(StorageError, self).__init__(**kwargs)
+ self.message = kwargs.get('message', None)
+
+
+class StorageErrorException(HttpResponseError):
+ """Server responsed with exception of type: 'StorageError'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, response, deserialize, *args):
+
+ model_name = 'StorageError'
+ self.error = deserialize(model_name, response)
+ if self.error is None:
+ self.error = deserialize.dependencies[model_name]()
+ super(StorageErrorException, self).__init__(response=response)
+
+
+class StorageServiceProperties(Model):
+ """Storage Service Properties.
+
+ :param logging:
+ :type logging: ~azure.storage.blob.models.Logging
+ :param hour_metrics:
+ :type hour_metrics: ~azure.storage.blob.models.Metrics
+ :param minute_metrics:
+ :type minute_metrics: ~azure.storage.blob.models.Metrics
+ :param cors: The set of CORS rules.
+ :type cors: list[~azure.storage.blob.models.CorsRule]
+ :param default_service_version: The default version to use for requests to
+ the Blob service if an incoming request's version is not specified.
+ Possible values include version 2008-10-27 and all more recent versions
+ :type default_service_version: str
+ :param delete_retention_policy:
+ :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy
+ :param static_website:
+ :type static_website: ~azure.storage.blob.models.StaticWebsite
+ """
+
+ _attribute_map = {
+ 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
+ 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
+ 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
+ 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
+ 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}},
+ 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}},
+ 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(StorageServiceProperties, self).__init__(**kwargs)
+ self.logging = kwargs.get('logging', None)
+ self.hour_metrics = kwargs.get('hour_metrics', None)
+ self.minute_metrics = kwargs.get('minute_metrics', None)
+ self.cors = kwargs.get('cors', None)
+ self.default_service_version = kwargs.get('default_service_version', None)
+ self.delete_retention_policy = kwargs.get('delete_retention_policy', None)
+ self.static_website = kwargs.get('static_website', None)
+
+
+class StorageServiceStats(Model):
+ """Stats for the storage service.
+
+ :param geo_replication:
+ :type geo_replication: ~azure.storage.blob.models.GeoReplication
+ """
+
+ _attribute_map = {
+ 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(StorageServiceStats, self).__init__(**kwargs)
+ self.geo_replication = kwargs.get('geo_replication', None)
+
+
+class UserDelegationKey(Model):
+ """A user delegation key.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param signed_oid: Required. The Azure Active Directory object ID in GUID
+ format.
+ :type signed_oid: str
+ :param signed_tid: Required. The Azure Active Directory tenant ID in GUID
+ format
+ :type signed_tid: str
+ :param signed_start: Required. The date-time the key is active
+ :type signed_start: datetime
+ :param signed_expiry: Required. The date-time the key expires
+ :type signed_expiry: datetime
+ :param signed_service: Required. Abbreviation of the Azure Storage service
+ that accepts the key
+ :type signed_service: str
+ :param signed_version: Required. The service version that created the key
+ :type signed_version: str
+ :param value: Required. The key as a base64 string
+ :type value: str
+ """
+
+ _validation = {
+ 'signed_oid': {'required': True},
+ 'signed_tid': {'required': True},
+ 'signed_start': {'required': True},
+ 'signed_expiry': {'required': True},
+ 'signed_service': {'required': True},
+ 'signed_version': {'required': True},
+ 'value': {'required': True},
+ }
+
+ _attribute_map = {
+ 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}},
+ 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}},
+ 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}},
+ 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}},
+ 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}},
+ 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}},
+ 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, **kwargs):
+ super(UserDelegationKey, self).__init__(**kwargs)
+ self.signed_oid = kwargs.get('signed_oid', None)
+ self.signed_tid = kwargs.get('signed_tid', None)
+ self.signed_start = kwargs.get('signed_start', None)
+ self.signed_expiry = kwargs.get('signed_expiry', None)
+ self.signed_service = kwargs.get('signed_service', None)
+ self.signed_version = kwargs.get('signed_version', None)
+ self.value = kwargs.get('value', None)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_models_py3.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_models_py3.py
new file mode 100644
index 00000000000..7e5a3fc9136
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/models/_models_py3.py
@@ -0,0 +1,2009 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from msrest.serialization import Model
+from azure.core.exceptions import HttpResponseError
+
+
+class AccessPolicy(Model):
+ """An Access policy.
+
+ :param start: the date-time the policy is active
+ :type start: str
+ :param expiry: the date-time the policy expires
+ :type expiry: str
+ :param permission: the permissions for the acl policy
+ :type permission: str
+ """
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
+ 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
+ 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None:
+ super(AccessPolicy, self).__init__(**kwargs)
+ self.start = start
+ self.expiry = expiry
+ self.permission = permission
+
+
+class AppendPositionAccessConditions(Model):
+ """Additional parameters for a set of operations, such as:
+ AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal.
+
+ :param max_size: Optional conditional header. The max length in bytes
+ permitted for the append blob. If the Append Block operation would cause
+ the blob to exceed that limit or if the blob size is already greater than
+ the value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition
+ Failed).
+ :type max_size: long
+ :param append_position: Optional conditional header, used only for the
+ Append Block operation. A number indicating the byte offset to compare.
+ Append Block will succeed only if the append position is equal to this
+ number. If it is not, the request will fail with the
+ AppendPositionConditionNotMet error (HTTP status code 412 - Precondition
+ Failed).
+ :type append_position: long
+ """
+
+ _attribute_map = {
+ 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}},
+ 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, max_size: int=None, append_position: int=None, **kwargs) -> None:
+ super(AppendPositionAccessConditions, self).__init__(**kwargs)
+ self.max_size = max_size
+ self.append_position = append_position
+
+
+class ArrowConfiguration(Model):
+ """arrow configuration.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param schema: Required.
+ :type schema: list[~azure.storage.blob.models.ArrowField]
+ """
+
+ _validation = {
+ 'schema': {'required': True},
+ }
+
+ _attribute_map = {
+ 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'itemsName': 'Schema', 'wrapped': True}},
+ }
+ _xml_map = {
+ 'name': 'ArrowConfiguration'
+ }
+
+ def __init__(self, *, schema, **kwargs) -> None:
+ super(ArrowConfiguration, self).__init__(**kwargs)
+ self.schema = schema
+
+
+class ArrowField(Model):
+ """field of an arrow schema.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param type: Required.
+ :type type: str
+ :param name:
+ :type name: str
+ :param precision:
+ :type precision: int
+ :param scale:
+ :type scale: int
+ """
+
+ _validation = {
+ 'type': {'required': True},
+ }
+
+ _attribute_map = {
+ 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}},
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'precision': {'key': 'Precision', 'type': 'int', 'xml': {'name': 'Precision'}},
+ 'scale': {'key': 'Scale', 'type': 'int', 'xml': {'name': 'Scale'}},
+ }
+ _xml_map = {
+ 'name': 'Field'
+ }
+
+ def __init__(self, *, type: str, name: str=None, precision: int=None, scale: int=None, **kwargs) -> None:
+ super(ArrowField, self).__init__(**kwargs)
+ self.type = type
+ self.name = name
+ self.precision = precision
+ self.scale = scale
+
+
+class BlobFlatListSegment(Model):
+ """BlobFlatListSegment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_items: Required.
+ :type blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+ """
+
+ _validation = {
+ 'blob_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}},
+ }
+ _xml_map = {
+ 'name': 'Blobs'
+ }
+
+ def __init__(self, *, blob_items, **kwargs) -> None:
+ super(BlobFlatListSegment, self).__init__(**kwargs)
+ self.blob_items = blob_items
+
+
+class BlobHierarchyListSegment(Model):
+ """BlobHierarchyListSegment.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_prefixes:
+ :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix]
+ :param blob_items: Required.
+ :type blob_items: list[~azure.storage.blob.models.BlobItemInternal]
+ """
+
+ _validation = {
+ 'blob_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}},
+ 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}},
+ }
+ _xml_map = {
+ 'name': 'Blobs'
+ }
+
+ def __init__(self, *, blob_items, blob_prefixes=None, **kwargs) -> None:
+ super(BlobHierarchyListSegment, self).__init__(**kwargs)
+ self.blob_prefixes = blob_prefixes
+ self.blob_items = blob_items
+
+
+class BlobHTTPHeaders(Model):
+ """Additional parameters for a set of operations.
+
+ :param blob_cache_control: Optional. Sets the blob's cache control. If
+ specified, this property is stored with the blob and returned with a read
+ request.
+ :type blob_cache_control: str
+ :param blob_content_type: Optional. Sets the blob's content type. If
+ specified, this property is stored with the blob and returned with a read
+ request.
+ :type blob_content_type: str
+ :param blob_content_md5: Optional. An MD5 hash of the blob content. Note
+ that this hash is not validated, as the hashes for the individual blocks
+ were validated when each was uploaded.
+ :type blob_content_md5: bytearray
+ :param blob_content_encoding: Optional. Sets the blob's content encoding.
+ If specified, this property is stored with the blob and returned with a
+ read request.
+ :type blob_content_encoding: str
+ :param blob_content_language: Optional. Set the blob's content language.
+ If specified, this property is stored with the blob and returned with a
+ read request.
+ :type blob_content_language: str
+ :param blob_content_disposition: Optional. Sets the blob's
+ Content-Disposition header.
+ :type blob_content_disposition: str
+ """
+
+ _attribute_map = {
+ 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}},
+ 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}},
+ 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}},
+ 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}},
+ 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}},
+ 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, blob_cache_control: str=None, blob_content_type: str=None, blob_content_md5: bytearray=None, blob_content_encoding: str=None, blob_content_language: str=None, blob_content_disposition: str=None, **kwargs) -> None:
+ super(BlobHTTPHeaders, self).__init__(**kwargs)
+ self.blob_cache_control = blob_cache_control
+ self.blob_content_type = blob_content_type
+ self.blob_content_md5 = blob_content_md5
+ self.blob_content_encoding = blob_content_encoding
+ self.blob_content_language = blob_content_language
+ self.blob_content_disposition = blob_content_disposition
+
+
+class BlobItemInternal(Model):
+ """An Azure Storage blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param deleted: Required.
+ :type deleted: bool
+ :param snapshot: Required.
+ :type snapshot: str
+ :param version_id:
+ :type version_id: str
+ :param is_current_version:
+ :type is_current_version: bool
+ :param properties: Required.
+ :type properties: ~azure.storage.blob.models.BlobPropertiesInternal
+ :param metadata:
+ :type metadata: ~azure.storage.blob.models.BlobMetadata
+ :param blob_tags:
+ :type blob_tags: ~azure.storage.blob.models.BlobTags
+ :param object_replication_metadata:
+ :type object_replication_metadata: dict[str, str]
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'deleted': {'required': True},
+ 'snapshot': {'required': True},
+ 'properties': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
+ 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
+ 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}},
+ 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}},
+ 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}},
+ 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}},
+ 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}},
+ 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}},
+ }
+ _xml_map = {
+ 'name': 'Blob'
+ }
+
+ def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, version_id: str=None, is_current_version: bool=None, metadata=None, blob_tags=None, object_replication_metadata=None, **kwargs) -> None:
+ super(BlobItemInternal, self).__init__(**kwargs)
+ self.name = name
+ self.deleted = deleted
+ self.snapshot = snapshot
+ self.version_id = version_id
+ self.is_current_version = is_current_version
+ self.properties = properties
+ self.metadata = metadata
+ self.blob_tags = blob_tags
+ self.object_replication_metadata = object_replication_metadata
+
+
+class BlobMetadata(Model):
+ """BlobMetadata.
+
+ :param additional_properties: Unmatched properties from the message are
+ deserialized this collection
+ :type additional_properties: dict[str, str]
+ :param encrypted:
+ :type encrypted: str
+ """
+
+ _attribute_map = {
+ 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}},
+ 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}},
+ }
+ _xml_map = {
+ 'name': 'Metadata'
+ }
+
+ def __init__(self, *, additional_properties=None, encrypted: str=None, **kwargs) -> None:
+ super(BlobMetadata, self).__init__(**kwargs)
+ self.additional_properties = additional_properties
+ self.encrypted = encrypted
+
+
+class BlobPrefix(Model):
+ """BlobPrefix.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, name: str, **kwargs) -> None:
+ super(BlobPrefix, self).__init__(**kwargs)
+ self.name = name
+
+
+class BlobPropertiesInternal(Model):
+ """Properties of a blob.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param creation_time:
+ :type creation_time: datetime
+ :param last_modified: Required.
+ :type last_modified: datetime
+ :param etag: Required.
+ :type etag: str
+ :param content_length: Size in bytes
+ :type content_length: long
+ :param content_type:
+ :type content_type: str
+ :param content_encoding:
+ :type content_encoding: str
+ :param content_language:
+ :type content_language: str
+ :param content_md5:
+ :type content_md5: bytearray
+ :param content_disposition:
+ :type content_disposition: str
+ :param cache_control:
+ :type cache_control: str
+ :param blob_sequence_number:
+ :type blob_sequence_number: long
+ :param blob_type: Possible values include: 'BlockBlob', 'PageBlob',
+ 'AppendBlob'
+ :type blob_type: str or ~azure.storage.blob.models.BlobType
+ :param lease_status: Possible values include: 'locked', 'unlocked'
+ :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+ :param lease_state: Possible values include: 'available', 'leased',
+ 'expired', 'breaking', 'broken'
+ :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
+ :param lease_duration: Possible values include: 'infinite', 'fixed'
+ :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+ :param copy_id:
+ :type copy_id: str
+ :param copy_status: Possible values include: 'pending', 'success',
+ 'aborted', 'failed'
+ :type copy_status: str or ~azure.storage.blob.models.CopyStatusType
+ :param copy_source:
+ :type copy_source: str
+ :param copy_progress:
+ :type copy_progress: str
+ :param copy_completion_time:
+ :type copy_completion_time: datetime
+ :param copy_status_description:
+ :type copy_status_description: str
+ :param server_encrypted:
+ :type server_encrypted: bool
+ :param incremental_copy:
+ :type incremental_copy: bool
+ :param destination_snapshot:
+ :type destination_snapshot: str
+ :param deleted_time:
+ :type deleted_time: datetime
+ :param remaining_retention_days:
+ :type remaining_retention_days: int
+ :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15',
+ 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type access_tier: str or ~azure.storage.blob.models.AccessTier
+ :param access_tier_inferred:
+ :type access_tier_inferred: bool
+ :param archive_status: Possible values include:
+ 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool'
+ :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus
+ :param customer_provided_key_sha256:
+ :type customer_provided_key_sha256: str
+ :param encryption_scope: The name of the encryption scope under which the
+ blob is encrypted.
+ :type encryption_scope: str
+ :param access_tier_change_time:
+ :type access_tier_change_time: datetime
+ :param tag_count:
+ :type tag_count: int
+ :param expires_on:
+ :type expires_on: datetime
+ :param is_sealed:
+ :type is_sealed: bool
+ :param rehydrate_priority: Possible values include: 'High', 'Standard'
+ :type rehydrate_priority: str or
+ ~azure.storage.blob.models.RehydratePriority
+ :param last_accessed_on:
+ :type last_accessed_on: datetime
+ """
+
+ _validation = {
+ 'last_modified': {'required': True},
+ 'etag': {'required': True},
+ }
+
+ _attribute_map = {
+ 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}},
+ 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
+ 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
+ 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
+ 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}},
+ 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}},
+ 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}},
+ 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}},
+ 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}},
+ 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}},
+ 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}},
+ 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}},
+ 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
+ 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
+ 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
+ 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}},
+ 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}},
+ 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}},
+ 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}},
+ 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}},
+ 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}},
+ 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}},
+ 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}},
+ 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}},
+ 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
+ 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
+ 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}},
+ 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}},
+ 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}},
+ 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}},
+ 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}},
+ 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}},
+ 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}},
+ 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}},
+ 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}},
+ 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}},
+ 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123', 'xml': {'name': 'LastAccessTime'}},
+ }
+ _xml_map = {
+ 'name': 'Properties'
+ }
+
+ def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, tag_count: int=None, expires_on=None, is_sealed: bool=None, rehydrate_priority=None, last_accessed_on=None, **kwargs) -> None:
+ super(BlobPropertiesInternal, self).__init__(**kwargs)
+ self.creation_time = creation_time
+ self.last_modified = last_modified
+ self.etag = etag
+ self.content_length = content_length
+ self.content_type = content_type
+ self.content_encoding = content_encoding
+ self.content_language = content_language
+ self.content_md5 = content_md5
+ self.content_disposition = content_disposition
+ self.cache_control = cache_control
+ self.blob_sequence_number = blob_sequence_number
+ self.blob_type = blob_type
+ self.lease_status = lease_status
+ self.lease_state = lease_state
+ self.lease_duration = lease_duration
+ self.copy_id = copy_id
+ self.copy_status = copy_status
+ self.copy_source = copy_source
+ self.copy_progress = copy_progress
+ self.copy_completion_time = copy_completion_time
+ self.copy_status_description = copy_status_description
+ self.server_encrypted = server_encrypted
+ self.incremental_copy = incremental_copy
+ self.destination_snapshot = destination_snapshot
+ self.deleted_time = deleted_time
+ self.remaining_retention_days = remaining_retention_days
+ self.access_tier = access_tier
+ self.access_tier_inferred = access_tier_inferred
+ self.archive_status = archive_status
+ self.customer_provided_key_sha256 = customer_provided_key_sha256
+ self.encryption_scope = encryption_scope
+ self.access_tier_change_time = access_tier_change_time
+ self.tag_count = tag_count
+ self.expires_on = expires_on
+ self.is_sealed = is_sealed
+ self.rehydrate_priority = rehydrate_priority
+ self.last_accessed_on = last_accessed_on
+
+
+class BlobTag(Model):
+ """BlobTag.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param key: Required.
+ :type key: str
+ :param value: Required.
+ :type value: str
+ """
+
+ _validation = {
+ 'key': {'required': True},
+ 'value': {'required': True},
+ }
+
+ _attribute_map = {
+ 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}},
+ 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
+ }
+ _xml_map = {
+ 'name': 'Tag'
+ }
+
+ def __init__(self, *, key: str, value: str, **kwargs) -> None:
+ super(BlobTag, self).__init__(**kwargs)
+ self.key = key
+ self.value = value
+
+
+class BlobTags(Model):
+ """Blob tags.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param blob_tag_set: Required.
+ :type blob_tag_set: list[~azure.storage.blob.models.BlobTag]
+ """
+
+ _validation = {
+ 'blob_tag_set': {'required': True},
+ }
+
+ _attribute_map = {
+ 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}},
+ }
+ _xml_map = {
+ 'name': 'Tags'
+ }
+
+ def __init__(self, *, blob_tag_set, **kwargs) -> None:
+ super(BlobTags, self).__init__(**kwargs)
+ self.blob_tag_set = blob_tag_set
+
+
+class Block(Model):
+ """Represents a single block in a block blob. It describes the block's ID and
+ size.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required. The base64 encoded block ID.
+ :type name: str
+ :param size: Required. The block size in bytes.
+ :type size: int
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'size': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, name: str, size: int, **kwargs) -> None:
+ super(Block, self).__init__(**kwargs)
+ self.name = name
+ self.size = size
+
+
+class BlockList(Model):
+ """BlockList.
+
+ :param committed_blocks:
+ :type committed_blocks: list[~azure.storage.blob.models.Block]
+ :param uncommitted_blocks:
+ :type uncommitted_blocks: list[~azure.storage.blob.models.Block]
+ """
+
+ _attribute_map = {
+ 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
+ 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, committed_blocks=None, uncommitted_blocks=None, **kwargs) -> None:
+ super(BlockList, self).__init__(**kwargs)
+ self.committed_blocks = committed_blocks
+ self.uncommitted_blocks = uncommitted_blocks
+
+
+class BlockLookupList(Model):
+ """BlockLookupList.
+
+ :param committed:
+ :type committed: list[str]
+ :param uncommitted:
+ :type uncommitted: list[str]
+ :param latest:
+ :type latest: list[str]
+ """
+
+ _attribute_map = {
+ 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}},
+ 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}},
+ 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}},
+ }
+ _xml_map = {
+ 'name': 'BlockList'
+ }
+
+ def __init__(self, *, committed=None, uncommitted=None, latest=None, **kwargs) -> None:
+ super(BlockLookupList, self).__init__(**kwargs)
+ self.committed = committed
+ self.uncommitted = uncommitted
+ self.latest = latest
+
+
+class ClearRange(Model):
+ """ClearRange.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param start: Required.
+ :type start: long
+ :param end: Required.
+ :type end: long
+ """
+
+ _validation = {
+ 'start': {'required': True},
+ 'end': {'required': True},
+ }
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
+ 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
+ }
+ _xml_map = {
+ 'name': 'ClearRange'
+ }
+
+ def __init__(self, *, start: int, end: int, **kwargs) -> None:
+ super(ClearRange, self).__init__(**kwargs)
+ self.start = start
+ self.end = end
+
+
+class ContainerCpkScopeInfo(Model):
+ """Additional parameters for create operation.
+
+ :param default_encryption_scope: Optional. Version 2019-07-07 and later.
+ Specifies the default encryption scope to set on the container and use for
+ all future writes.
+ :type default_encryption_scope: str
+ :param prevent_encryption_scope_override: Optional. Version 2019-07-07
+ and newer. If true, prevents any request from specifying a different
+ encryption scope than the scope set on the container.
+ :type prevent_encryption_scope_override: bool
+ """
+
+ _attribute_map = {
+ 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}},
+ 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, **kwargs) -> None:
+ super(ContainerCpkScopeInfo, self).__init__(**kwargs)
+ self.default_encryption_scope = default_encryption_scope
+ self.prevent_encryption_scope_override = prevent_encryption_scope_override
+
+
+class ContainerItem(Model):
+ """An Azure Storage container.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param deleted:
+ :type deleted: bool
+ :param version:
+ :type version: str
+ :param properties: Required.
+ :type properties: ~azure.storage.blob.models.ContainerProperties
+ :param metadata:
+ :type metadata: dict[str, str]
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'properties': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
+ 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
+ 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}},
+ 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
+ }
+ _xml_map = {
+ 'name': 'Container'
+ }
+
+ def __init__(self, *, name: str, properties, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None:
+ super(ContainerItem, self).__init__(**kwargs)
+ self.name = name
+ self.deleted = deleted
+ self.version = version
+ self.properties = properties
+ self.metadata = metadata
+
+
+class ContainerProperties(Model):
+ """Properties of a container.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param last_modified: Required.
+ :type last_modified: datetime
+ :param etag: Required.
+ :type etag: str
+ :param lease_status: Possible values include: 'locked', 'unlocked'
+ :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType
+ :param lease_state: Possible values include: 'available', 'leased',
+ 'expired', 'breaking', 'broken'
+ :type lease_state: str or ~azure.storage.blob.models.LeaseStateType
+ :param lease_duration: Possible values include: 'infinite', 'fixed'
+ :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType
+ :param public_access: Possible values include: 'container', 'blob'
+ :type public_access: str or ~azure.storage.blob.models.PublicAccessType
+ :param has_immutability_policy:
+ :type has_immutability_policy: bool
+ :param has_legal_hold:
+ :type has_legal_hold: bool
+ :param default_encryption_scope:
+ :type default_encryption_scope: str
+ :param prevent_encryption_scope_override:
+ :type prevent_encryption_scope_override: bool
+ :param deleted_time:
+ :type deleted_time: datetime
+ :param remaining_retention_days:
+ :type remaining_retention_days: int
+ """
+
+ _validation = {
+ 'last_modified': {'required': True},
+ 'etag': {'required': True},
+ }
+
+ _attribute_map = {
+ 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
+ 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
+ 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
+ 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
+ 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
+ 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}},
+ 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}},
+ 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}},
+ 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}},
+ 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}},
+ 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
+ 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, deleted_time=None, remaining_retention_days: int=None, **kwargs) -> None:
+ super(ContainerProperties, self).__init__(**kwargs)
+ self.last_modified = last_modified
+ self.etag = etag
+ self.lease_status = lease_status
+ self.lease_state = lease_state
+ self.lease_duration = lease_duration
+ self.public_access = public_access
+ self.has_immutability_policy = has_immutability_policy
+ self.has_legal_hold = has_legal_hold
+ self.default_encryption_scope = default_encryption_scope
+ self.prevent_encryption_scope_override = prevent_encryption_scope_override
+ self.deleted_time = deleted_time
+ self.remaining_retention_days = remaining_retention_days
+
+
+class CorsRule(Model):
+ """CORS is an HTTP feature that enables a web application running under one
+ domain to access resources in another domain. Web browsers implement a
+ security restriction known as same-origin policy that prevents a web page
+ from calling APIs in a different domain; CORS provides a secure way to
+ allow one domain (the origin domain) to call APIs in another domain.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param allowed_origins: Required. The origin domains that are permitted to
+ make a request against the storage service via CORS. The origin domain is
+ the domain from which the request originates. Note that the origin must be
+ an exact case-sensitive match with the origin that the user age sends to
+ the service. You can also use the wildcard character '*' to allow all
+ origin domains to make requests via CORS.
+ :type allowed_origins: str
+ :param allowed_methods: Required. The methods (HTTP request verbs) that
+ the origin domain may use for a CORS request. (comma separated)
+ :type allowed_methods: str
+ :param allowed_headers: Required. the request headers that the origin
+ domain may specify on the CORS request.
+ :type allowed_headers: str
+ :param exposed_headers: Required. The response headers that may be sent in
+ the response to the CORS request and exposed by the browser to the request
+ issuer
+ :type exposed_headers: str
+ :param max_age_in_seconds: Required. The maximum amount time that a
+ browser should cache the preflight OPTIONS request.
+ :type max_age_in_seconds: int
+ """
+
+ _validation = {
+ 'allowed_origins': {'required': True},
+ 'allowed_methods': {'required': True},
+ 'allowed_headers': {'required': True},
+ 'exposed_headers': {'required': True},
+ 'max_age_in_seconds': {'required': True, 'minimum': 0},
+ }
+
+ _attribute_map = {
+ 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
+ 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
+ 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
+ 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
+ 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None:
+ super(CorsRule, self).__init__(**kwargs)
+ self.allowed_origins = allowed_origins
+ self.allowed_methods = allowed_methods
+ self.allowed_headers = allowed_headers
+ self.exposed_headers = exposed_headers
+ self.max_age_in_seconds = max_age_in_seconds
+
+
+class CpkInfo(Model):
+ """Additional parameters for a set of operations.
+
+ :param encryption_key: Optional. Specifies the encryption key to use to
+ encrypt the data provided in the request. If not specified, encryption is
+ performed with the root account encryption key. For more information, see
+ Encryption at Rest for Azure Storage Services.
+ :type encryption_key: str
+ :param encryption_key_sha256: The SHA-256 hash of the provided encryption
+ key. Must be provided if the x-ms-encryption-key header is provided.
+ :type encryption_key_sha256: str
+ :param encryption_algorithm: The algorithm used to produce the encryption
+ key hash. Currently, the only accepted value is "AES256". Must be provided
+ if the x-ms-encryption-key header is provided. Possible values include:
+ 'AES256'
+ :type encryption_algorithm: str or
+ ~azure.storage.blob.models.EncryptionAlgorithmType
+ """
+
+ _attribute_map = {
+ 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}},
+ 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}},
+ 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, encryption_key: str=None, encryption_key_sha256: str=None, encryption_algorithm=None, **kwargs) -> None:
+ super(CpkInfo, self).__init__(**kwargs)
+ self.encryption_key = encryption_key
+ self.encryption_key_sha256 = encryption_key_sha256
+ self.encryption_algorithm = encryption_algorithm
+
+
+class CpkScopeInfo(Model):
+ """Additional parameters for a set of operations.
+
+ :param encryption_scope: Optional. Version 2019-07-07 and later.
+ Specifies the name of the encryption scope to use to encrypt the data
+ provided in the request. If not specified, encryption is performed with
+ the default account encryption scope. For more information, see
+ Encryption at Rest for Azure Storage Services.
+ :type encryption_scope: str
+ """
+
+ _attribute_map = {
+ 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, encryption_scope: str=None, **kwargs) -> None:
+ super(CpkScopeInfo, self).__init__(**kwargs)
+ self.encryption_scope = encryption_scope
+
+
+class DataLakeStorageError(Model):
+ """DataLakeStorageError.
+
+ :param data_lake_storage_error_details: The service error response object.
+ :type data_lake_storage_error_details:
+ ~azure.storage.blob.models.DataLakeStorageErrorError
+ """
+
+ _attribute_map = {
+ 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, data_lake_storage_error_details=None, **kwargs) -> None:
+ super(DataLakeStorageError, self).__init__(**kwargs)
+ self.data_lake_storage_error_details = data_lake_storage_error_details
+
+
+class DataLakeStorageErrorException(HttpResponseError):
+ """Server responsed with exception of type: 'DataLakeStorageError'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, response, deserialize, *args):
+
+ model_name = 'DataLakeStorageError'
+ self.error = deserialize(model_name, response)
+ if self.error is None:
+ self.error = deserialize.dependencies[model_name]()
+ super(DataLakeStorageErrorException, self).__init__(response=response)
+
+
+class DataLakeStorageErrorError(Model):
+ """The service error response object.
+
+ :param code: The service error code.
+ :type code: str
+ :param message: The service error message.
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}},
+ 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None:
+ super(DataLakeStorageErrorError, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+
+
+class DelimitedTextConfiguration(Model):
+ """delimited text configuration.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param column_separator: Required. column separator
+ :type column_separator: str
+ :param field_quote: Required. field quote
+ :type field_quote: str
+ :param record_separator: Required. record separator
+ :type record_separator: str
+ :param escape_char: Required. escape char
+ :type escape_char: str
+ :param headers_present: Required. has headers
+ :type headers_present: bool
+ """
+
+ _validation = {
+ 'column_separator': {'required': True},
+ 'field_quote': {'required': True},
+ 'record_separator': {'required': True},
+ 'escape_char': {'required': True},
+ 'headers_present': {'required': True},
+ }
+
+ _attribute_map = {
+ 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}},
+ 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}},
+ 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}},
+ 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}},
+ 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}},
+ }
+ _xml_map = {
+ 'name': 'DelimitedTextConfiguration'
+ }
+
+ def __init__(self, *, column_separator: str, field_quote: str, record_separator: str, escape_char: str, headers_present: bool, **kwargs) -> None:
+ super(DelimitedTextConfiguration, self).__init__(**kwargs)
+ self.column_separator = column_separator
+ self.field_quote = field_quote
+ self.record_separator = record_separator
+ self.escape_char = escape_char
+ self.headers_present = headers_present
+
+
+class DirectoryHttpHeaders(Model):
+ """Additional parameters for a set of operations, such as: Directory_create,
+ Directory_rename, Blob_rename.
+
+ :param cache_control: Cache control for given resource
+ :type cache_control: str
+ :param content_type: Content type for given resource
+ :type content_type: str
+ :param content_encoding: Content encoding for given resource
+ :type content_encoding: str
+ :param content_language: Content language for given resource
+ :type content_language: str
+ :param content_disposition: Content disposition for given resource
+ :type content_disposition: str
+ """
+
+ _attribute_map = {
+ 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}},
+ 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}},
+ 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}},
+ 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}},
+ 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, cache_control: str=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, **kwargs) -> None:
+ super(DirectoryHttpHeaders, self).__init__(**kwargs)
+ self.cache_control = cache_control
+ self.content_type = content_type
+ self.content_encoding = content_encoding
+ self.content_language = content_language
+ self.content_disposition = content_disposition
+
+
+class FilterBlobItem(Model):
+ """Blob info from a Filter Blobs API call.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param name: Required.
+ :type name: str
+ :param container_name: Required.
+ :type container_name: str
+ :param tag_value: Required.
+ :type tag_value: str
+ """
+
+ _validation = {
+ 'name': {'required': True},
+ 'container_name': {'required': True},
+ 'tag_value': {'required': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}},
+ 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}},
+ }
+ _xml_map = {
+ 'name': 'Blob'
+ }
+
+ def __init__(self, *, name: str, container_name: str, tag_value: str, **kwargs) -> None:
+ super(FilterBlobItem, self).__init__(**kwargs)
+ self.name = name
+ self.container_name = container_name
+ self.tag_value = tag_value
+
+
+class FilterBlobSegment(Model):
+ """The result of a Filter Blobs API call.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param where: Required.
+ :type where: str
+ :param blobs: Required.
+ :type blobs: list[~azure.storage.blob.models.FilterBlobItem]
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'where': {'required': True},
+ 'blobs': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}},
+ 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, *, service_endpoint: str, where: str, blobs, next_marker: str=None, **kwargs) -> None:
+ super(FilterBlobSegment, self).__init__(**kwargs)
+ self.service_endpoint = service_endpoint
+ self.where = where
+ self.blobs = blobs
+ self.next_marker = next_marker
+
+
+class GeoReplication(Model):
+ """Geo-Replication information for the Secondary Storage Service.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param status: Required. The status of the secondary location. Possible
+ values include: 'live', 'bootstrap', 'unavailable'
+ :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType
+ :param last_sync_time: Required. A GMT date/time value, to the second. All
+ primary writes preceding this value are guaranteed to be available for
+ read operations at the secondary. Primary writes after this point in time
+ may or may not be available for reads.
+ :type last_sync_time: datetime
+ """
+
+ _validation = {
+ 'status': {'required': True},
+ 'last_sync_time': {'required': True},
+ }
+
+ _attribute_map = {
+ 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}},
+ 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, status, last_sync_time, **kwargs) -> None:
+ super(GeoReplication, self).__init__(**kwargs)
+ self.status = status
+ self.last_sync_time = last_sync_time
+
+
+class JsonTextConfiguration(Model):
+ """json text configuration.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param record_separator: Required. record separator
+ :type record_separator: str
+ """
+
+ _validation = {
+ 'record_separator': {'required': True},
+ }
+
+ _attribute_map = {
+ 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}},
+ }
+ _xml_map = {
+ 'name': 'JsonTextConfiguration'
+ }
+
+ def __init__(self, *, record_separator: str, **kwargs) -> None:
+ super(JsonTextConfiguration, self).__init__(**kwargs)
+ self.record_separator = record_separator
+
+
+class KeyInfo(Model):
+ """Key information.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param start: Required. The date-time the key is active in ISO 8601 UTC
+ time
+ :type start: str
+ :param expiry: Required. The date-time the key expires in ISO 8601 UTC
+ time
+ :type expiry: str
+ """
+
+ _validation = {
+ 'start': {'required': True},
+ 'expiry': {'required': True},
+ }
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
+ 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, start: str, expiry: str, **kwargs) -> None:
+ super(KeyInfo, self).__init__(**kwargs)
+ self.start = start
+ self.expiry = expiry
+
+
+class LeaseAccessConditions(Model):
+ """Additional parameters for a set of operations.
+
+ :param lease_id: If specified, the operation only succeeds if the
+ resource's lease is active and matches this ID.
+ :type lease_id: str
+ """
+
+ _attribute_map = {
+ 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, lease_id: str=None, **kwargs) -> None:
+ super(LeaseAccessConditions, self).__init__(**kwargs)
+ self.lease_id = lease_id
+
+
+class ListBlobsFlatSegmentResponse(Model):
+ """An enumeration of blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param container_name: Required.
+ :type container_name: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param segment: Required.
+ :type segment: ~azure.storage.blob.models.BlobFlatListSegment
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_name': {'required': True},
+ 'segment': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
+ 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
+ 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
+ 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None:
+ super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = service_endpoint
+ self.container_name = container_name
+ self.prefix = prefix
+ self.marker = marker
+ self.max_results = max_results
+ self.segment = segment
+ self.next_marker = next_marker
+
+
+class ListBlobsHierarchySegmentResponse(Model):
+ """An enumeration of blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param container_name: Required.
+ :type container_name: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param delimiter:
+ :type delimiter: str
+ :param segment: Required.
+ :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_name': {'required': True},
+ 'segment': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
+ 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
+ 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
+ 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}},
+ 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, delimiter: str=None, next_marker: str=None, **kwargs) -> None:
+ super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = service_endpoint
+ self.container_name = container_name
+ self.prefix = prefix
+ self.marker = marker
+ self.max_results = max_results
+ self.delimiter = delimiter
+ self.segment = segment
+ self.next_marker = next_marker
+
+
+class ListContainersSegmentResponse(Model):
+ """An enumeration of containers.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param service_endpoint: Required.
+ :type service_endpoint: str
+ :param prefix:
+ :type prefix: str
+ :param marker:
+ :type marker: str
+ :param max_results:
+ :type max_results: int
+ :param container_items: Required.
+ :type container_items: list[~azure.storage.blob.models.ContainerItem]
+ :param next_marker:
+ :type next_marker: str
+ """
+
+ _validation = {
+ 'service_endpoint': {'required': True},
+ 'container_items': {'required': True},
+ }
+
+ _attribute_map = {
+ 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
+ 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
+ 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
+ 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
+ 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}},
+ 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
+ }
+ _xml_map = {
+ 'name': 'EnumerationResults'
+ }
+
+ def __init__(self, *, service_endpoint: str, container_items, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None:
+ super(ListContainersSegmentResponse, self).__init__(**kwargs)
+ self.service_endpoint = service_endpoint
+ self.prefix = prefix
+ self.marker = marker
+ self.max_results = max_results
+ self.container_items = container_items
+ self.next_marker = next_marker
+
+
+class Logging(Model):
+ """Azure Analytics Logging settings.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param version: Required. The version of Storage Analytics to configure.
+ :type version: str
+ :param delete: Required. Indicates whether all delete requests should be
+ logged.
+ :type delete: bool
+ :param read: Required. Indicates whether all read requests should be
+ logged.
+ :type read: bool
+ :param write: Required. Indicates whether all write requests should be
+ logged.
+ :type write: bool
+ :param retention_policy: Required.
+ :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
+ """
+
+ _validation = {
+ 'version': {'required': True},
+ 'delete': {'required': True},
+ 'read': {'required': True},
+ 'write': {'required': True},
+ 'retention_policy': {'required': True},
+ }
+
+ _attribute_map = {
+ 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
+ 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}},
+ 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}},
+ 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}},
+ 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None:
+ super(Logging, self).__init__(**kwargs)
+ self.version = version
+ self.delete = delete
+ self.read = read
+ self.write = write
+ self.retention_policy = retention_policy
+
+
+class Metrics(Model):
+ """a summary of request statistics grouped by API in hour or minute aggregates
+ for blobs.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param version: The version of Storage Analytics to configure.
+ :type version: str
+ :param enabled: Required. Indicates whether metrics are enabled for the
+ Blob service.
+ :type enabled: bool
+ :param include_apis: Indicates whether metrics should generate summary
+ statistics for called API operations.
+ :type include_apis: bool
+ :param retention_policy:
+ :type retention_policy: ~azure.storage.blob.models.RetentionPolicy
+ """
+
+ _validation = {
+ 'enabled': {'required': True},
+ }
+
+ _attribute_map = {
+ 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
+ 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
+ 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
+ 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None:
+ super(Metrics, self).__init__(**kwargs)
+ self.version = version
+ self.enabled = enabled
+ self.include_apis = include_apis
+ self.retention_policy = retention_policy
+
+
+class ModifiedAccessConditions(Model):
+ """Additional parameters for a set of operations.
+
+ :param if_modified_since: Specify this header value to operate only on a
+ blob if it has been modified since the specified date/time.
+ :type if_modified_since: datetime
+ :param if_unmodified_since: Specify this header value to operate only on a
+ blob if it has not been modified since the specified date/time.
+ :type if_unmodified_since: datetime
+ :param if_match: Specify an ETag value to operate only on blobs with a
+ matching value.
+ :type if_match: str
+ :param if_none_match: Specify an ETag value to operate only on blobs
+ without a matching value.
+ :type if_none_match: str
+ :param if_tags: Specify a SQL where clause on blob tags to operate only on
+ blobs with a matching value.
+ :type if_tags: str
+ """
+
+ _attribute_map = {
+ 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}},
+ 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}},
+ 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}},
+ 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}},
+ 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, if_tags: str=None, **kwargs) -> None:
+ super(ModifiedAccessConditions, self).__init__(**kwargs)
+ self.if_modified_since = if_modified_since
+ self.if_unmodified_since = if_unmodified_since
+ self.if_match = if_match
+ self.if_none_match = if_none_match
+ self.if_tags = if_tags
+
+
+class PageList(Model):
+ """the list of pages.
+
+ :param page_range:
+ :type page_range: list[~azure.storage.blob.models.PageRange]
+ :param clear_range:
+ :type clear_range: list[~azure.storage.blob.models.ClearRange]
+ """
+
+ _attribute_map = {
+ 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}},
+ 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, page_range=None, clear_range=None, **kwargs) -> None:
+ super(PageList, self).__init__(**kwargs)
+ self.page_range = page_range
+ self.clear_range = clear_range
+
+
+class PageRange(Model):
+ """PageRange.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param start: Required.
+ :type start: long
+ :param end: Required.
+ :type end: long
+ """
+
+ _validation = {
+ 'start': {'required': True},
+ 'end': {'required': True},
+ }
+
+ _attribute_map = {
+ 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
+ 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
+ }
+ _xml_map = {
+ 'name': 'PageRange'
+ }
+
+ def __init__(self, *, start: int, end: int, **kwargs) -> None:
+ super(PageRange, self).__init__(**kwargs)
+ self.start = start
+ self.end = end
+
+
+class QueryFormat(Model):
+ """QueryFormat.
+
+ :param type: Possible values include: 'delimited', 'json', 'arrow'
+ :type type: str or ~azure.storage.blob.models.QueryFormatType
+ :param delimited_text_configuration:
+ :type delimited_text_configuration:
+ ~azure.storage.blob.models.DelimitedTextConfiguration
+ :param json_text_configuration:
+ :type json_text_configuration:
+ ~azure.storage.blob.models.JsonTextConfiguration
+ :param arrow_configuration:
+ :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration
+ """
+
+ _attribute_map = {
+ 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}},
+ 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}},
+ 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}},
+ 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration', 'xml': {'name': 'ArrowConfiguration'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, type=None, delimited_text_configuration=None, json_text_configuration=None, arrow_configuration=None, **kwargs) -> None:
+ super(QueryFormat, self).__init__(**kwargs)
+ self.type = type
+ self.delimited_text_configuration = delimited_text_configuration
+ self.json_text_configuration = json_text_configuration
+ self.arrow_configuration = arrow_configuration
+
+
+class QueryRequest(Model):
+ """the quick query body.
+
+ Variables are only populated by the server, and will be ignored when
+ sending a request.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :ivar query_type: Required. the query type. Default value: "SQL" .
+ :vartype query_type: str
+ :param expression: Required. a query statement
+ :type expression: str
+ :param input_serialization:
+ :type input_serialization: ~azure.storage.blob.models.QuerySerialization
+ :param output_serialization:
+ :type output_serialization: ~azure.storage.blob.models.QuerySerialization
+ """
+
+ _validation = {
+ 'query_type': {'required': True, 'constant': True},
+ 'expression': {'required': True},
+ }
+
+ _attribute_map = {
+ 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}},
+ 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}},
+ 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}},
+ 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}},
+ }
+ _xml_map = {
+ 'name': 'QueryRequest'
+ }
+
+ query_type = "SQL"
+
+ def __init__(self, *, expression: str, input_serialization=None, output_serialization=None, **kwargs) -> None:
+ super(QueryRequest, self).__init__(**kwargs)
+ self.expression = expression
+ self.input_serialization = input_serialization
+ self.output_serialization = output_serialization
+
+
+class QuerySerialization(Model):
+ """QuerySerialization.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param format: Required.
+ :type format: ~azure.storage.blob.models.QueryFormat
+ """
+
+ _validation = {
+ 'format': {'required': True},
+ }
+
+ _attribute_map = {
+ 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, format, **kwargs) -> None:
+ super(QuerySerialization, self).__init__(**kwargs)
+ self.format = format
+
+
+class RetentionPolicy(Model):
+ """the retention policy which determines how long the associated data should
+ persist.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param enabled: Required. Indicates whether a retention policy is enabled
+ for the storage service
+ :type enabled: bool
+ :param days: Indicates the number of days that metrics or logging or
+ soft-deleted data should be retained. All data older than this value will
+ be deleted
+ :type days: int
+ """
+
+ _validation = {
+ 'enabled': {'required': True},
+ 'days': {'minimum': 1},
+ }
+
+ _attribute_map = {
+ 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
+ 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None:
+ super(RetentionPolicy, self).__init__(**kwargs)
+ self.enabled = enabled
+ self.days = days
+
+
+class SequenceNumberAccessConditions(Model):
+ """Additional parameters for a set of operations, such as:
+ PageBlob_upload_pages, PageBlob_clear_pages,
+ PageBlob_upload_pages_from_url.
+
+ :param if_sequence_number_less_than_or_equal_to: Specify this header value
+ to operate only on a blob if it has a sequence number less than or equal
+ to the specified.
+ :type if_sequence_number_less_than_or_equal_to: long
+ :param if_sequence_number_less_than: Specify this header value to operate
+ only on a blob if it has a sequence number less than the specified.
+ :type if_sequence_number_less_than: long
+ :param if_sequence_number_equal_to: Specify this header value to operate
+ only on a blob if it has the specified sequence number.
+ :type if_sequence_number_equal_to: long
+ """
+
+ _attribute_map = {
+ 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}},
+ 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}},
+ 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, if_sequence_number_less_than_or_equal_to: int=None, if_sequence_number_less_than: int=None, if_sequence_number_equal_to: int=None, **kwargs) -> None:
+ super(SequenceNumberAccessConditions, self).__init__(**kwargs)
+ self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to
+ self.if_sequence_number_less_than = if_sequence_number_less_than
+ self.if_sequence_number_equal_to = if_sequence_number_equal_to
+
+
+class SignedIdentifier(Model):
+ """signed identifier.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param id: Required. a unique id
+ :type id: str
+ :param access_policy:
+ :type access_policy: ~azure.storage.blob.models.AccessPolicy
+ """
+
+ _validation = {
+ 'id': {'required': True},
+ }
+
+ _attribute_map = {
+ 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
+ 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
+ }
+ _xml_map = {
+ 'name': 'SignedIdentifier'
+ }
+
+ def __init__(self, *, id: str, access_policy=None, **kwargs) -> None:
+ super(SignedIdentifier, self).__init__(**kwargs)
+ self.id = id
+ self.access_policy = access_policy
+
+
+class SourceModifiedAccessConditions(Model):
+ """Additional parameters for a set of operations.
+
+ :param source_if_modified_since: Specify this header value to operate only
+ on a blob if it has been modified since the specified date/time.
+ :type source_if_modified_since: datetime
+ :param source_if_unmodified_since: Specify this header value to operate
+ only on a blob if it has not been modified since the specified date/time.
+ :type source_if_unmodified_since: datetime
+ :param source_if_match: Specify an ETag value to operate only on blobs
+ with a matching value.
+ :type source_if_match: str
+ :param source_if_none_match: Specify an ETag value to operate only on
+ blobs without a matching value.
+ :type source_if_none_match: str
+ :param source_if_tags: Specify a SQL where clause on blob tags to operate
+ only on blobs with a matching value.
+ :type source_if_tags: str
+ """
+
+ _attribute_map = {
+ 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}},
+ 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}},
+ 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}},
+ 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}},
+ 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match: str=None, source_if_none_match: str=None, source_if_tags: str=None, **kwargs) -> None:
+ super(SourceModifiedAccessConditions, self).__init__(**kwargs)
+ self.source_if_modified_since = source_if_modified_since
+ self.source_if_unmodified_since = source_if_unmodified_since
+ self.source_if_match = source_if_match
+ self.source_if_none_match = source_if_none_match
+ self.source_if_tags = source_if_tags
+
+
+class StaticWebsite(Model):
+ """The properties that enable an account to host a static website.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param enabled: Required. Indicates whether this account is hosting a
+ static website
+ :type enabled: bool
+ :param index_document: The default name of the index page under each
+ directory
+ :type index_document: str
+ :param error_document404_path: The absolute path of the custom 404 page
+ :type error_document404_path: str
+ :param default_index_document_path: Absolute path of the default index
+ page
+ :type default_index_document_path: str
+ """
+
+ _validation = {
+ 'enabled': {'required': True},
+ }
+
+ _attribute_map = {
+ 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
+ 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}},
+ 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}},
+ 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, default_index_document_path: str=None, **kwargs) -> None:
+ super(StaticWebsite, self).__init__(**kwargs)
+ self.enabled = enabled
+ self.index_document = index_document
+ self.error_document404_path = error_document404_path
+ self.default_index_document_path = default_index_document_path
+
+
+class StorageError(Model):
+ """StorageError.
+
+ :param message:
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, message: str=None, **kwargs) -> None:
+ super(StorageError, self).__init__(**kwargs)
+ self.message = message
+
+
+class StorageErrorException(HttpResponseError):
+ """Server responsed with exception of type: 'StorageError'.
+
+ :param deserialize: A deserializer
+ :param response: Server response to be deserialized.
+ """
+
+ def __init__(self, response, deserialize, *args):
+
+ model_name = 'StorageError'
+ self.error = deserialize(model_name, response)
+ if self.error is None:
+ self.error = deserialize.dependencies[model_name]()
+ super(StorageErrorException, self).__init__(response=response)
+
+
+class StorageServiceProperties(Model):
+ """Storage Service Properties.
+
+ :param logging:
+ :type logging: ~azure.storage.blob.models.Logging
+ :param hour_metrics:
+ :type hour_metrics: ~azure.storage.blob.models.Metrics
+ :param minute_metrics:
+ :type minute_metrics: ~azure.storage.blob.models.Metrics
+ :param cors: The set of CORS rules.
+ :type cors: list[~azure.storage.blob.models.CorsRule]
+ :param default_service_version: The default version to use for requests to
+ the Blob service if an incoming request's version is not specified.
+ Possible values include version 2008-10-27 and all more recent versions
+ :type default_service_version: str
+ :param delete_retention_policy:
+ :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy
+ :param static_website:
+ :type static_website: ~azure.storage.blob.models.StaticWebsite
+ """
+
+ _attribute_map = {
+ 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}},
+ 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
+ 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
+ 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
+ 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}},
+ 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}},
+ 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, default_service_version: str=None, delete_retention_policy=None, static_website=None, **kwargs) -> None:
+ super(StorageServiceProperties, self).__init__(**kwargs)
+ self.logging = logging
+ self.hour_metrics = hour_metrics
+ self.minute_metrics = minute_metrics
+ self.cors = cors
+ self.default_service_version = default_service_version
+ self.delete_retention_policy = delete_retention_policy
+ self.static_website = static_website
+
+
+class StorageServiceStats(Model):
+ """Stats for the storage service.
+
+ :param geo_replication:
+ :type geo_replication: ~azure.storage.blob.models.GeoReplication
+ """
+
+ _attribute_map = {
+ 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, geo_replication=None, **kwargs) -> None:
+ super(StorageServiceStats, self).__init__(**kwargs)
+ self.geo_replication = geo_replication
+
+
+class UserDelegationKey(Model):
+ """A user delegation key.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param signed_oid: Required. The Azure Active Directory object ID in GUID
+ format.
+ :type signed_oid: str
+ :param signed_tid: Required. The Azure Active Directory tenant ID in GUID
+ format
+ :type signed_tid: str
+ :param signed_start: Required. The date-time the key is active
+ :type signed_start: datetime
+ :param signed_expiry: Required. The date-time the key expires
+ :type signed_expiry: datetime
+ :param signed_service: Required. Abbreviation of the Azure Storage service
+ that accepts the key
+ :type signed_service: str
+ :param signed_version: Required. The service version that created the key
+ :type signed_version: str
+ :param value: Required. The key as a base64 string
+ :type value: str
+ """
+
+ _validation = {
+ 'signed_oid': {'required': True},
+ 'signed_tid': {'required': True},
+ 'signed_start': {'required': True},
+ 'signed_expiry': {'required': True},
+ 'signed_service': {'required': True},
+ 'signed_version': {'required': True},
+ 'value': {'required': True},
+ }
+
+ _attribute_map = {
+ 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}},
+ 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}},
+ 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}},
+ 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}},
+ 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}},
+ 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}},
+ 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}},
+ }
+ _xml_map = {
+ }
+
+ def __init__(self, *, signed_oid: str, signed_tid: str, signed_start, signed_expiry, signed_service: str, signed_version: str, value: str, **kwargs) -> None:
+ super(UserDelegationKey, self).__init__(**kwargs)
+ self.signed_oid = signed_oid
+ self.signed_tid = signed_tid
+ self.signed_start = signed_start
+ self.signed_expiry = signed_expiry
+ self.signed_service = signed_service
+ self.signed_version = signed_version
+ self.value = value
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/__init__.py
new file mode 100644
index 00000000000..1ea04533440
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/__init__.py
@@ -0,0 +1,28 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from ._service_operations import ServiceOperations
+from ._container_operations import ContainerOperations
+from ._directory_operations import DirectoryOperations
+from ._blob_operations import BlobOperations
+from ._page_blob_operations import PageBlobOperations
+from ._append_blob_operations import AppendBlobOperations
+from ._block_blob_operations import BlockBlobOperations
+
+__all__ = [
+ 'ServiceOperations',
+ 'ContainerOperations',
+ 'DirectoryOperations',
+ 'BlobOperations',
+ 'PageBlobOperations',
+ 'AppendBlobOperations',
+ 'BlockBlobOperations',
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_append_blob_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_append_blob_operations.py
new file mode 100644
index 00000000000..000810acffe
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_append_blob_operations.py
@@ -0,0 +1,694 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from .. import models
+
+
+class AppendBlobOperations(object):
+ """AppendBlobOperations operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_blob_type = "AppendBlob"
+
+ def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Create Append Blob operation creates a new append blob.
+
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{containerName}/{blob}'}
+
+ def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Append Block operation commits a new block of data to the end of an
+ existing append blob. The Append Block operation is permitted only if
+ the blob was created with x-ms-blob-type set to AppendBlob. Append
+ Block is supported only on version 2015-02-21 version or later.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param append_position_access_conditions: Additional parameters for
+ the operation
+ :type append_position_access_conditions:
+ ~azure.storage.blob.models.AppendPositionAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ max_size = None
+ if append_position_access_conditions is not None:
+ max_size = append_position_access_conditions.max_size
+ append_position = None
+ if append_position_access_conditions is not None:
+ append_position = append_position_access_conditions.append_position
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "appendblock"
+
+ # Construct URL
+ url = self.append_block.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if max_size is not None:
+ header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
+ if append_position is not None:
+ header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ append_block.metadata = {'url': '/{containerName}/{blob}'}
+
+ def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
+ """The Append Block operation commits a new block of data to the end of an
+ existing append blob where the contents are read from a source url. The
+ Append Block operation is permitted only if the blob was created with
+ x-ms-blob-type set to AppendBlob. Append Block is supported only on
+ version 2015-02-21 version or later.
+
+ :param source_url: Specify a URL to the copy source.
+ :type source_url: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param source_range: Bytes of source data in the specified range.
+ :type source_range: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param source_contentcrc64: Specify the crc64 calculated for the range
+ of bytes that must be read from the copy source.
+ :type source_contentcrc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param append_position_access_conditions: Additional parameters for
+ the operation
+ :type append_position_access_conditions:
+ ~azure.storage.blob.models.AppendPositionAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ max_size = None
+ if append_position_access_conditions is not None:
+ max_size = append_position_access_conditions.max_size
+ append_position = None
+ if append_position_access_conditions is not None:
+ append_position = append_position_access_conditions.append_position
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ comp = "appendblock"
+
+ # Construct URL
+ url = self.append_block_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
+ if source_range is not None:
+ header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if source_contentcrc64 is not None:
+ header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if max_size is not None:
+ header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long')
+ if append_position is not None:
+ header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ append_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, cls=None, **kwargs):
+ """The Seal operation seals the Append Blob to make it read-only. Seal is
+ supported only on version 2019-12-12 version or later.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param append_position_access_conditions: Additional parameters for
+ the operation
+ :type append_position_access_conditions:
+ ~azure.storage.blob.models.AppendPositionAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ append_position = None
+ if append_position_access_conditions is not None:
+ append_position = append_position_access_conditions.append_position
+
+ comp = "seal"
+
+ # Construct URL
+ url = self.seal.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if append_position is not None:
+ header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ seal.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_blob_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_blob_operations.py
new file mode 100644
index 00000000000..394a519856a
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_blob_operations.py
@@ -0,0 +1,3065 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from .. import models
+
+
+class BlobOperations(object):
+ """BlobOperations operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_requires_sync: . Constant value: "true".
+ :ivar x_ms_copy_action: . Constant value: "abort".
+ :ivar restype: . Constant value: "account".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_requires_sync = "true"
+ self.x_ms_copy_action = "abort"
+ self.restype = "account"
+
+ def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Download operation reads or downloads a blob from the system,
+ including its metadata and properties. You can also call Download to
+ read a snapshot.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param range_get_content_md5: When set to true and specified together
+ with the Range, the service returns the MD5 hash for the range, as
+ long as the range is less than or equal to 4 MB in size.
+ :type range_get_content_md5: bool
+ :param range_get_content_crc64: When set to true and specified
+ together with the Range, the service returns the CRC64 hash for the
+ range, as long as the range is less than or equal to 4 MB in size.
+ :type range_get_content_crc64: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: object or the result of cls(response)
+ :rtype: Generator
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.download.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ if range_get_content_md5 is not None:
+ header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool')
+ if range_get_content_crc64 is not None:
+ header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 206]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')),
+ 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ if response.status_code == 206:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')),
+ 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ download.metadata = {'url': '/{containerName}/{blob}'}
+
+ def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Get Properties operation returns all user-defined metadata,
+ standard HTTP properties, and system properties for the blob. It does
+ not return the content of the blob.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')),
+ 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')),
+ 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')),
+ 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')),
+ 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')),
+ 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')),
+ 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')),
+ 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')),
+ 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')),
+ 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')),
+ 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_properties.metadata = {'url': '/{containerName}/{blob}'}
+
+ def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """If the storage account's soft delete feature is disabled then, when a
+ blob is deleted, it is permanently removed from the storage account. If
+ the storage account's soft delete feature is enabled, then, when a blob
+ is deleted, it is marked for deletion and becomes inaccessible
+ immediately. However, the blob service retains the blob or snapshot for
+ the number of days specified by the DeleteRetentionPolicy section of
+ [Storage service properties] (Set-Blob-Service-Properties.md). After
+ the specified number of days has passed, the blob's data is permanently
+ removed from the storage account. Note that you continue to be charged
+ for the soft-deleted blob's storage until it is permanently removed.
+ Use the List Blobs API and specify the "include=deleted" query
+ parameter to discover which blobs and snapshots have been soft deleted.
+ You can then use the Undelete Blob API to restore a soft-deleted blob.
+ All other operations on a soft-deleted blob or snapshot causes the
+ service to return an HTTP status code of 404 (ResourceNotFound).
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param delete_snapshots: Required if the blob has associated
+ snapshots. Specify one of the following two options: include: Delete
+ the base blob and all of its snapshots. only: Delete only the blob's
+ snapshots and not the blob itself. Possible values include: 'include',
+ 'only'
+ :type delete_snapshots: str or
+ ~azure.storage.blob.models.DeleteSnapshotsOptionType
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if delete_snapshots is not None:
+ header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{containerName}/{blob}'}
+
+ def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Set the owner, group, permissions, or access control list for a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_acl: Sets POSIX access control rights on files and
+ directories. The value is a comma-separated list of access control
+ entries. Each access control entry (ACE) consists of a scope, a type,
+ a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type posix_acl: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "setAccessControl"
+
+ # Construct URL
+ url = self.set_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ set_access_control.metadata = {'url': '/{filesystem}/{path}'}
+
+ def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Get the owner, group, permissions, or access control list for a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param upn: Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the identity values returned in
+ the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false.
+ :type upn: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "getAccessControl"
+
+ # Construct URL
+ url = self.get_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
+ 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
+ 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
+ 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ get_access_control.metadata = {'url': '/{filesystem}/{path}'}
+
+ def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
+ """Rename a blob/file. By default, the destination is overwritten and if
+ the destination already exists and has a lease the lease is broken.
+ This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ To fail if the destination already exists, use a conditional request
+ with If-None-Match: "*".
+
+ :param rename_source: The file or directory to be renamed. The value
+ must have the following format: "/{filesysystem}/{path}". If
+ "x-ms-properties" is specified, the properties will overwrite the
+ existing properties; otherwise, the existing properties will be
+ preserved.
+ :type rename_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param path_rename_mode: Determines the behavior of the rename
+ operation. Possible values include: 'legacy', 'posix'
+ :type path_rename_mode: str or
+ ~azure.storage.blob.models.PathRenameMode
+ :param directory_properties: Optional. User-defined properties to be
+ stored with the file or directory, in the format of a comma-separated
+ list of name and value pairs "n1=v1, n2=v2, ...", where each value is
+ base64 encoded.
+ :type directory_properties: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_umask: Only valid if Hierarchical Namespace is enabled
+ for the account. This umask restricts permission settings for file and
+ directory, and will only be applied when default Acl does not exist in
+ parent directory. If the umask bit has set, it means that the
+ corresponding permission will be disabled. Otherwise the corresponding
+ permission will be determined by the permission. A 4-digit octal
+ notation (e.g. 0022) is supported here. If no umask was specified, a
+ default umask - 0027 will be used.
+ :type posix_umask: str
+ :param source_lease_id: A lease ID for the source path. If specified,
+ the source path must have an active lease and the lease ID must match.
+ :type source_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param directory_http_headers: Additional parameters for the operation
+ :type directory_http_headers:
+ ~azure.storage.blob.models.DirectoryHttpHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ cache_control = None
+ if directory_http_headers is not None:
+ cache_control = directory_http_headers.cache_control
+ content_type = None
+ if directory_http_headers is not None:
+ content_type = directory_http_headers.content_type
+ content_encoding = None
+ if directory_http_headers is not None:
+ content_encoding = directory_http_headers.content_encoding
+ content_language = None
+ if directory_http_headers is not None:
+ content_language = directory_http_headers.content_language
+ content_disposition = None
+ if directory_http_headers is not None:
+ content_disposition = directory_http_headers.content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ # Construct URL
+ url = self.rename.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if path_rename_mode is not None:
+ query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
+ if directory_properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
+ if source_lease_id is not None:
+ header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ rename.metadata = {'url': '/{filesystem}/{path}'}
+
+ def undelete(self, timeout=None, request_id=None, cls=None, **kwargs):
+ """Undelete a blob that was previously soft deleted.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "undelete"
+
+ # Construct URL
+ url = self.undelete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ undelete.metadata = {'url': '/{containerName}/{blob}'}
+
+ def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, cls=None, **kwargs):
+ """Sets the time a blob will expire and be deleted.
+
+ :param expiry_options: Required. Indicates mode of the expiry time.
+ Possible values include: 'NeverExpire', 'RelativeToCreation',
+ 'RelativeToNow', 'Absolute'
+ :type expiry_options: str or
+ ~azure.storage.blob.models.BlobExpiryOptions
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param expires_on: The time to set the blob to expiry
+ :type expires_on: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "expiry"
+
+ # Construct URL
+ url = self.set_expiry.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str')
+ if expires_on is not None:
+ header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_expiry.metadata = {'url': '/{containerName}/{blob}'}
+
+ def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Set HTTP Headers operation sets system properties on the blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "properties"
+
+ # Construct URL
+ url = self.set_http_headers.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_http_headers.metadata = {'url': '/{containerName}/{blob}'}
+
+ def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Set Blob Metadata operation sets user-defined metadata for the
+ specified blob as one or more name-value pairs.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "metadata"
+
+ # Construct URL
+ url = self.set_metadata.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_metadata.metadata = {'url': '/{containerName}/{blob}'}
+
+ def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param duration: Specifies the duration of the lease, in seconds, or
+ negative one (-1) for a lease that never expires. A non-infinite lease
+ can be between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change.
+ :type duration: int
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "acquire"
+
+ # Construct URL
+ url = self.acquire_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if duration is not None:
+ header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
+ if proposed_lease_id is not None:
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ acquire_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "release"
+
+ # Construct URL
+ url = self.release_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ release_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "renew"
+
+ # Construct URL
+ url = self.renew_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ renew_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "change"
+
+ # Construct URL
+ url = self.change_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ change_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] The Lease Blob operation establishes and manages a lock on a
+ blob for write and delete operations.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param break_period: For a break operation, proposed duration the
+ lease should continue before it is broken, in seconds, between 0 and
+ 60. This break period is only used if it is shorter than the time
+ remaining on the lease. If longer, the time remaining on the lease is
+ used. A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break period.
+ If this header does not appear with a break operation, a
+ fixed-duration lease breaks after the remaining lease period elapses,
+ and an infinite lease breaks immediately.
+ :type break_period: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "lease"
+ action = "break"
+
+ # Construct URL
+ url = self.break_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if break_period is not None:
+ header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ break_lease.metadata = {'url': '/{containerName}/{blob}'}
+
+ def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs):
+ """The Create Snapshot operation creates a read-only snapshot of a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ comp = "snapshot"
+
+ # Construct URL
+ url = self.create_snapshot.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create_snapshot.metadata = {'url': '/{containerName}/{blob}'}
+
+ def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs):
+ """The Start Copy From URL operation copies a blob or an internet resource
+ to a new blob.
+
+ :param copy_source: Specifies the name of the source page blob
+ snapshot. This value is a URL of up to 2 KB in length that specifies a
+ page blob snapshot. The value should be URL-encoded as it would appear
+ in a request URI. The source blob must either be public or must be
+ authenticated via a shared access signature.
+ :type copy_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param rehydrate_priority: Optional: Indicates the priority with which
+ to rehydrate an archived blob. Possible values include: 'High',
+ 'Standard'
+ :type rehydrate_priority: str or
+ ~azure.storage.blob.models.RehydratePriority
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param seal_blob: Overrides the sealed state of the destination blob.
+ Service version 2019-12-12 and newer.
+ :type seal_blob: bool
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+ source_if_tags = None
+ if source_modified_access_conditions is not None:
+ source_if_tags = source_modified_access_conditions.source_if_tags
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ # Construct URL
+ url = self.start_copy_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ if rehydrate_priority is not None:
+ header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
+ header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ if seal_blob is not None:
+ header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+ if source_if_tags is not None:
+ header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs):
+ """The Copy From URL operation copies a blob or an internet resource to a
+ new blob. It will not return a response until the copy is complete.
+
+ :param copy_source: Specifies the name of the source page blob
+ snapshot. This value is a URL of up to 2 KB in length that specifies a
+ page blob snapshot. The value should be URL-encoded as it would appear
+ in a request URI. The source blob must either be public or must be
+ authenticated via a shared access signature.
+ :type copy_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ # Construct URL
+ url = self.copy_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
+ """The Abort Copy From URL operation aborts a pending Copy From URL
+ operation, and leaves a destination blob with zero length and full
+ metadata.
+
+ :param copy_id: The copy identifier provided in the x-ms-copy-id
+ header of the original Copy Blob operation.
+ :type copy_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ comp = "copy"
+
+ # Construct URL
+ url = self.abort_copy_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Set Tier operation sets the tier on a blob. The operation is
+ allowed on a page blob in a premium storage account and on a block blob
+ in a blob storage account (locally redundant storage only). A premium
+ page blob's tier determines the allowed size, IOPS, and bandwidth of
+ the blob. A block blob's tier determines Hot/Cool/Archive storage type.
+ This operation does not update the blob's ETag.
+
+ :param tier: Indicates the tier to be set on the blob. Possible values
+ include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60',
+ 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierRequired
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param rehydrate_priority: Optional: Indicates the priority with which
+ to rehydrate an archived blob. Possible values include: 'High',
+ 'Standard'
+ :type rehydrate_priority: str or
+ ~azure.storage.blob.models.RehydratePriority
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "tier"
+
+ # Construct URL
+ url = self.set_tier.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ if rehydrate_priority is not None:
+ header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_tier.metadata = {'url': '/{containerName}/{blob}'}
+
+ def get_account_info(self, cls=None, **kwargs):
+ """Returns the sku name and account kind .
+
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_account_info.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
+ 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_account_info.metadata = {'url': '/{containerName}/{blob}'}
+
+ def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Query operation enables users to select/project on blob data by
+ providing simple query expressions.
+
+ :param query_request: the query request
+ :type query_request: ~azure.storage.blob.models.QueryRequest
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: object or the result of cls(response)
+ :rtype: Generator
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "query"
+
+ # Construct URL
+ url = self.query.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+ if query_request is not None:
+ body_content = self._serialize.body(query_request, 'QueryRequest')
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 206]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ if response.status_code == 206:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')),
+ 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')),
+ 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')),
+ 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')),
+ 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')),
+ 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')),
+ 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')),
+ 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ query.metadata = {'url': '/{containerName}/{blob}'}
+
+ def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Get Tags operation enables users to get the tags associated with a
+ blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: BlobTags or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.BlobTags
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "tags"
+
+ # Construct URL
+ url = self.get_tags.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('BlobTags', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_tags.metadata = {'url': '/{containerName}/{blob}'}
+
+ def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Set Tags operation enables users to set tags on a blob.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param version_id: The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to operate
+ on. It's for service version 2019-10-10 and newer.
+ :type version_id: str
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param tags: Blob tags
+ :type tags: ~azure.storage.blob.models.BlobTags
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "tags"
+
+ # Construct URL
+ url = self.set_tags.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if version_id is not None:
+ query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+ if tags is not None:
+ body_content = self._serialize.body(tags, 'BlobTags')
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_tags.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_block_blob_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_block_blob_operations.py
new file mode 100644
index 00000000000..8228c4782b6
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_block_blob_operations.py
@@ -0,0 +1,833 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from .. import models
+
+
+class BlockBlobOperations(object):
+ """BlockBlobOperations operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_blob_type = "BlockBlob"
+
+ def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Upload Block Blob operation updates the content of an existing
+ block blob. Updating an existing block blob overwrites any existing
+ metadata on the blob. Partial updates are not supported with Put Blob;
+ the content of the existing blob is overwritten with the content of the
+ new blob. To perform a partial update of the content of a block blob,
+ use the Put Block List operation.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.upload.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ upload.metadata = {'url': '/{containerName}/{blob}'}
+
+ def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, cls=None, **kwargs):
+ """The Stage Block operation creates a new block to be committed as part
+ of a blob.
+
+ :param block_id: A valid Base64 string value that identifies the
+ block. Prior to encoding, the string must be less than or equal to 64
+ bytes in size. For a given blob, the length of the value specified for
+ the blockid parameter must be the same size for each block.
+ :type block_id: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param body: Initial data
+ :type body: Generator
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+
+ comp = "block"
+
+ # Construct URL
+ url = self.stage_block.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ stage_block.metadata = {'url': '/{containerName}/{blob}'}
+
+ def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
+ """The Stage Block operation creates a new block to be committed as part
+ of a blob where the contents are read from a URL.
+
+ :param block_id: A valid Base64 string value that identifies the
+ block. Prior to encoding, the string must be less than or equal to 64
+ bytes in size. For a given blob, the length of the value specified for
+ the blockid parameter must be the same size for each block.
+ :type block_id: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param source_url: Specify a URL to the copy source.
+ :type source_url: str
+ :param source_range: Bytes of source data in the specified range.
+ :type source_range: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param source_contentcrc64: Specify the crc64 calculated for the range
+ of bytes that must be read from the copy source.
+ :type source_contentcrc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ comp = "block"
+
+ # Construct URL
+ url = self.stage_block_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
+ if source_range is not None:
+ header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if source_contentcrc64 is not None:
+ header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Commit Block List operation writes a blob by specifying the list of
+ block IDs that make up the blob. In order to be written as part of a
+ blob, a block must have been successfully written to the server in a
+ prior Put Block operation. You can call Put Block List to update a blob
+ by uploading only those blocks that have changed, then committing the
+ new and existing blocks together. You can do this by specifying whether
+ to commit a block from the committed block list or from the uncommitted
+ block list, or to commit the most recently uploaded version of the
+ block, whichever list it may belong to.
+
+ :param blocks:
+ :type blocks: ~azure.storage.blob.models.BlockLookupList
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param tier: Optional. Indicates the tier to be set on the blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive'
+ :type tier: str or ~azure.storage.blob.models.AccessTierOptional
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "blocklist"
+
+ # Construct URL
+ url = self.commit_block_list.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+ body_content = self._serialize.body(blocks, 'BlockLookupList')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ commit_block_list.metadata = {'url': '/{containerName}/{blob}'}
+
+ def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Get Block List operation retrieves the list of blocks that have
+ been uploaded as part of a block blob.
+
+ :param list_type: Specifies whether to return the list of committed
+ blocks, the list of uncommitted blocks, or both lists together.
+ Possible values include: 'committed', 'uncommitted', 'all'
+ :type list_type: str or ~azure.storage.blob.models.BlockListType
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: BlockList or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.BlockList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "blocklist"
+
+ # Construct URL
+ url = self.get_block_list.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('BlockList', response)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_block_list.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_container_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_container_operations.py
new file mode 100644
index 00000000000..5730483519a
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_container_operations.py
@@ -0,0 +1,1400 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from .. import models
+
+
+class ContainerOperations(object):
+ """ContainerOperations operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+
+ def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, cls=None, **kwargs):
+ """creates a new container under the specified account. If the container
+ with the same name already exists, the operation fails.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param access: Specifies whether data in the container may be accessed
+ publicly and the level of access. Possible values include:
+ 'container', 'blob'
+ :type access: str or ~azure.storage.blob.models.PublicAccessType
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param container_cpk_scope_info: Additional parameters for the
+ operation
+ :type container_cpk_scope_info:
+ ~azure.storage.blob.models.ContainerCpkScopeInfo
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ default_encryption_scope = None
+ if container_cpk_scope_info is not None:
+ default_encryption_scope = container_cpk_scope_info.default_encryption_scope
+ prevent_encryption_scope_override = None
+ if container_cpk_scope_info is not None:
+ prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override
+
+ restype = "container"
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ if access is not None:
+ header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if default_encryption_scope is not None:
+ header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str')
+ if prevent_encryption_scope_override is not None:
+ header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{containerName}'}
+
+ def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
+ """returns all user-defined metadata and system properties for the
+ specified container. The data returned does not include the container's
+ list of blobs.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ restype = "container"
+
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')),
+ 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')),
+ 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
+ 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')),
+ 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')),
+ 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')),
+ 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_properties.metadata = {'url': '/{containerName}'}
+
+ def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """operation marks the specified container for deletion. The container and
+ any blobs contained within it are later deleted during garbage
+ collection.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ restype = "container"
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{containerName}'}
+
+ def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """operation sets one or more user-defined name-value pairs for the
+ specified container.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+
+ restype = "container"
+ comp = "metadata"
+
+ # Construct URL
+ url = self.set_metadata.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_metadata.metadata = {'url': '/{containerName}'}
+
+ def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs):
+ """gets the permissions for the specified container. The permissions
+ indicate whether container data may be accessed publicly.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: list or the result of cls(response)
+ :rtype: list[~azure.storage.blob.models.SignedIdentifier]
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+
+ restype = "container"
+ comp = "acl"
+
+ # Construct URL
+ url = self.get_access_policy.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('[SignedIdentifier]', response)
+ header_dict = {
+ 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_access_policy.metadata = {'url': '/{containerName}'}
+
+ def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """sets the permissions for the specified container. The permissions
+ indicate whether blobs in a container may be accessed publicly.
+
+ :param container_acl: the acls for the container
+ :type container_acl: list[~azure.storage.blob.models.SignedIdentifier]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param access: Specifies whether data in the container may be accessed
+ publicly and the level of access. Possible values include:
+ 'container', 'blob'
+ :type access: str or ~azure.storage.blob.models.PublicAccessType
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ restype = "container"
+ comp = "acl"
+
+ # Construct URL
+ url = self.set_access_policy.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ if access is not None:
+ header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct body
+ serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}}
+ if container_acl is not None:
+ body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt)
+ else:
+ body_content = None
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_access_policy.metadata = {'url': '/{containerName}'}
+
+ def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, cls=None, **kwargs):
+ """Restores a previously-deleted container.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param deleted_container_name: Optional. Version 2019-12-12 and
+ laster. Specifies the name of the deleted container to restore.
+ :type deleted_container_name: str
+ :param deleted_container_version: Optional. Version 2019-12-12 and
+ laster. Specifies the version of the deleted container to restore.
+ :type deleted_container_version: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "container"
+ comp = "undelete"
+
+ # Construct URL
+ url = self.restore.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if deleted_container_name is not None:
+ header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str')
+ if deleted_container_version is not None:
+ header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ restore.metadata = {'url': '/{containerName}'}
+
+ def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param duration: Specifies the duration of the lease, in seconds, or
+ negative one (-1) for a lease that never expires. A non-infinite lease
+ can be between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change.
+ :type duration: int
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "acquire"
+
+ # Construct URL
+ url = self.acquire_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if duration is not None:
+ header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
+ if proposed_lease_id is not None:
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ acquire_lease.metadata = {'url': '/{containerName}'}
+
+ def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "release"
+
+ # Construct URL
+ url = self.release_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ release_lease.metadata = {'url': '/{containerName}'}
+
+ def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "renew"
+
+ # Construct URL
+ url = self.renew_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ renew_lease.metadata = {'url': '/{containerName}'}
+
+ def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param break_period: For a break operation, proposed duration the
+ lease should continue before it is broken, in seconds, between 0 and
+ 60. This break period is only used if it is shorter than the time
+ remaining on the lease. If longer, the time remaining on the lease is
+ used. A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break period.
+ If this header does not appear with a break operation, a
+ fixed-duration lease breaks after the remaining lease period elapses,
+ and an infinite lease breaks immediately.
+ :type break_period: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "break"
+
+ # Construct URL
+ url = self.break_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if break_period is not None:
+ header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ break_lease.metadata = {'url': '/{containerName}'}
+
+ def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """[Update] establishes and manages a lock on a container for delete
+ operations. The lock duration can be 15 to 60 seconds, or can be
+ infinite.
+
+ :param lease_id: Specifies the current lease ID on the resource.
+ :type lease_id: str
+ :param proposed_lease_id: Proposed lease ID, in a GUID string format.
+ The Blob service returns 400 (Invalid request) if the proposed lease
+ ID is not in the correct format. See Guid Constructor (String) for a
+ list of valid GUID string formats.
+ :type proposed_lease_id: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ comp = "lease"
+ restype = "container"
+ action = "change"
+
+ # Construct URL
+ url = self.change_lease.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ change_lease.metadata = {'url': '/{containerName}'}
+
+ def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs):
+ """[Update] The List Blobs operation returns a list of the blobs under the
+ specified container.
+
+ :param prefix: Filters the results to return only containers whose
+ name begins with the specified prefix.
+ :type prefix: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param include: Include this parameter to specify one or more datasets
+ to include in the response.
+ :type include: list[str or
+ ~azure.storage.blob.models.ListBlobsIncludeItem]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: ListBlobsFlatSegmentResponse or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "container"
+ comp = "list"
+
+ # Construct URL
+ url = self.list_blob_flat_segment.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response)
+ header_dict = {
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_blob_flat_segment.metadata = {'url': '/{containerName}'}
+
+ def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs):
+ """[Update] The List Blobs operation returns a list of the blobs under the
+ specified container.
+
+ :param delimiter: When the request includes this parameter, the
+ operation returns a BlobPrefix element in the response body that acts
+ as a placeholder for all blobs whose names begin with the same
+ substring up to the appearance of the delimiter character. The
+ delimiter may be a single character or a string.
+ :type delimiter: str
+ :param prefix: Filters the results to return only containers whose
+ name begins with the specified prefix.
+ :type prefix: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param include: Include this parameter to specify one or more datasets
+ to include in the response.
+ :type include: list[str or
+ ~azure.storage.blob.models.ListBlobsIncludeItem]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: ListBlobsHierarchySegmentResponse or the result of
+ cls(response)
+ :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "container"
+ comp = "list"
+
+ # Construct URL
+ url = self.list_blob_hierarchy_segment.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response)
+ header_dict = {
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'}
+
+ def get_account_info(self, cls=None, **kwargs):
+ """Returns the sku name and account kind .
+
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "account"
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_account_info.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
+ 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_account_info.metadata = {'url': '/{containerName}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_directory_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_directory_operations.py
new file mode 100644
index 00000000000..c2bf3178b97
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_directory_operations.py
@@ -0,0 +1,739 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from .. import models
+
+
+class DirectoryOperations(object):
+ """DirectoryOperations operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar resource: . Constant value: "directory".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.resource = "directory"
+
+ def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Create a directory. By default, the destination is overwritten and if
+ the destination already exists and has a lease the lease is broken.
+ This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ To fail if the destination already exists, use a conditional request
+ with If-None-Match: "*".
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param directory_properties: Optional. User-defined properties to be
+ stored with the file or directory, in the format of a comma-separated
+ list of name and value pairs "n1=v1, n2=v2, ...", where each value is
+ base64 encoded.
+ :type directory_properties: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_umask: Only valid if Hierarchical Namespace is enabled
+ for the account. This umask restricts permission settings for file and
+ directory, and will only be applied when default Acl does not exist in
+ parent directory. If the umask bit has set, it means that the
+ corresponding permission will be disabled. Otherwise the corresponding
+ permission will be determined by the permission. A 4-digit octal
+ notation (e.g. 0022) is supported here. If no umask was specified, a
+ default umask - 0027 will be used.
+ :type posix_umask: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param directory_http_headers: Additional parameters for the operation
+ :type directory_http_headers:
+ ~azure.storage.blob.models.DirectoryHttpHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ cache_control = None
+ if directory_http_headers is not None:
+ cache_control = directory_http_headers.cache_control
+ content_type = None
+ if directory_http_headers is not None:
+ content_type = directory_http_headers.content_type
+ content_encoding = None
+ if directory_http_headers is not None:
+ content_encoding = directory_http_headers.content_encoding
+ content_language = None
+ if directory_http_headers is not None:
+ content_language = directory_http_headers.content_language
+ content_disposition = None
+ if directory_http_headers is not None:
+ content_disposition = directory_http_headers.content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if directory_properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{filesystem}/{path}'}
+
+ def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
+ """Rename a directory. By default, the destination is overwritten and if
+ the destination already exists and has a lease the lease is broken.
+ This operation supports conditional HTTP requests. For more
+ information, see [Specifying Conditional Headers for Blob Service
+ Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations).
+ To fail if the destination already exists, use a conditional request
+ with If-None-Match: "*".
+
+ :param rename_source: The file or directory to be renamed. The value
+ must have the following format: "/{filesysystem}/{path}". If
+ "x-ms-properties" is specified, the properties will overwrite the
+ existing properties; otherwise, the existing properties will be
+ preserved.
+ :type rename_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param marker: When renaming a directory, the number of paths that are
+ renamed with each invocation is limited. If the number of paths to be
+ renamed exceeds this limit, a continuation token is returned in this
+ response header. When a continuation token is returned in the
+ response, it must be specified in a subsequent invocation of the
+ rename operation to continue renaming the directory.
+ :type marker: str
+ :param path_rename_mode: Determines the behavior of the rename
+ operation. Possible values include: 'legacy', 'posix'
+ :type path_rename_mode: str or
+ ~azure.storage.blob.models.PathRenameMode
+ :param directory_properties: Optional. User-defined properties to be
+ stored with the file or directory, in the format of a comma-separated
+ list of name and value pairs "n1=v1, n2=v2, ...", where each value is
+ base64 encoded.
+ :type directory_properties: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_umask: Only valid if Hierarchical Namespace is enabled
+ for the account. This umask restricts permission settings for file and
+ directory, and will only be applied when default Acl does not exist in
+ parent directory. If the umask bit has set, it means that the
+ corresponding permission will be disabled. Otherwise the corresponding
+ permission will be determined by the permission. A 4-digit octal
+ notation (e.g. 0022) is supported here. If no umask was specified, a
+ default umask - 0027 will be used.
+ :type posix_umask: str
+ :param source_lease_id: A lease ID for the source path. If specified,
+ the source path must have an active lease and the lease ID must match.
+ :type source_lease_id: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param directory_http_headers: Additional parameters for the operation
+ :type directory_http_headers:
+ ~azure.storage.blob.models.DirectoryHttpHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ cache_control = None
+ if directory_http_headers is not None:
+ cache_control = directory_http_headers.cache_control
+ content_type = None
+ if directory_http_headers is not None:
+ content_type = directory_http_headers.content_type
+ content_encoding = None
+ if directory_http_headers is not None:
+ content_encoding = directory_http_headers.content_encoding
+ content_language = None
+ if directory_http_headers is not None:
+ content_language = directory_http_headers.content_language
+ content_disposition = None
+ if directory_http_headers is not None:
+ content_disposition = directory_http_headers.content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ # Construct URL
+ url = self.rename.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if marker is not None:
+ query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
+ if path_rename_mode is not None:
+ query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str')
+ if directory_properties is not None:
+ header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_umask is not None:
+ header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str')
+ if source_lease_id is not None:
+ header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if cache_control is not None:
+ header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str')
+ if content_type is not None:
+ header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str')
+ if content_encoding is not None:
+ header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str')
+ if content_language is not None:
+ header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str')
+ if content_disposition is not None:
+ header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ rename.metadata = {'url': '/{filesystem}/{path}'}
+
+ def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Deletes the directory.
+
+ :param recursive_directory_delete: If "true", all paths beneath the
+ directory will be deleted. If "false" and the directory is non-empty,
+ an error occurs.
+ :type recursive_directory_delete: bool
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param marker: When renaming a directory, the number of paths that are
+ renamed with each invocation is limited. If the number of paths to be
+ renamed exceeds this limit, a continuation token is returned in this
+ response header. When a continuation token is returned in the
+ response, it must be specified in a subsequent invocation of the
+ rename operation to continue renaming the directory.
+ :type marker: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+
+ # Construct URL
+ url = self.delete.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool')
+ if marker is not None:
+ query_parameters['continuation'] = self._serialize.query("marker", marker, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ }
+ return cls(response, None, response_headers)
+ delete.metadata = {'url': '/{filesystem}/{path}'}
+
+ def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Set the owner, group, permissions, or access control list for a
+ directory.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param owner: Optional. The owner of the blob or directory.
+ :type owner: str
+ :param group: Optional. The owning group of the blob or directory.
+ :type group: str
+ :param posix_permissions: Optional and only valid if Hierarchical
+ Namespace is enabled for the account. Sets POSIX access permissions
+ for the file owner, the file owning group, and others. Each class may
+ be granted read, write, or execute permission. The sticky bit is also
+ supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g.
+ 0766) are supported.
+ :type posix_permissions: str
+ :param posix_acl: Sets POSIX access control rights on files and
+ directories. The value is a comma-separated list of access control
+ entries. Each access control entry (ACE) consists of a scope, a type,
+ a user or group identifier, and permissions in the format
+ "[scope:][type]:[id]:[permissions]".
+ :type posix_acl: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "setAccessControl"
+
+ # Construct URL
+ url = self.set_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if owner is not None:
+ header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str')
+ if group is not None:
+ header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str')
+ if posix_permissions is not None:
+ header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str')
+ if posix_acl is not None:
+ header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.patch(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ set_access_control.metadata = {'url': '/{filesystem}/{path}'}
+
+ def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Get the owner, group, permissions, or access control list for a
+ directory.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param upn: Optional. Valid only when Hierarchical Namespace is
+ enabled for the account. If "true", the identity values returned in
+ the x-ms-owner, x-ms-group, and x-ms-acl response headers will be
+ transformed from Azure Active Directory Object IDs to User Principal
+ Names. If "false", the values will be returned as Azure Active
+ Directory Object IDs. The default value is false.
+ :type upn: bool
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`DataLakeStorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+
+ action = "getAccessControl"
+
+ # Construct URL
+ url = self.get_access_control.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if upn is not None:
+ query_parameters['upn'] = self._serialize.query("upn", upn, 'bool')
+ query_parameters['action'] = self._serialize.query("action", action, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+
+ # Construct and send request
+ request = self._client.head(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.DataLakeStorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')),
+ 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')),
+ 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')),
+ 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ }
+ return cls(response, None, response_headers)
+ get_access_control.metadata = {'url': '/{filesystem}/{path}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_page_blob_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_page_blob_operations.py
new file mode 100644
index 00000000000..fedc96c2151
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_page_blob_operations.py
@@ -0,0 +1,1399 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from .. import models
+
+
+class PageBlobOperations(object):
+ """PageBlobOperations operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob".
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+ self.x_ms_blob_type = "PageBlob"
+
+ def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Create operation creates a new page blob.
+
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param blob_content_length: This header specifies the maximum size for
+ the page blob, up to 1 TB. The page blob size must be aligned to a
+ 512-byte boundary.
+ :type blob_content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param tier: Optional. Indicates the tier to be set on the page blob.
+ Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30',
+ 'P40', 'P50', 'P60', 'P70', 'P80'
+ :type tier: str or
+ ~azure.storage.blob.models.PremiumPageBlobAccessTier
+ :param metadata: Optional. Specifies a user-defined name-value pair
+ associated with the blob. If no name-value pairs are specified, the
+ operation will copy the metadata from the source blob or file to the
+ destination blob. If one or more name-value pairs are specified, the
+ destination blob is created with the specified metadata, and metadata
+ is not copied from the source blob or file. Note that beginning with
+ version 2009-09-19, metadata names must adhere to the naming rules for
+ C# identifiers. See Naming and Referencing Containers, Blobs, and
+ Metadata for more information.
+ :type metadata: str
+ :param blob_sequence_number: Set for page blobs only. The sequence
+ number is a user-controlled value that you can use to track requests.
+ The value of the sequence number must be between 0 and 2^63 - 1.
+ :type blob_sequence_number: long
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param blob_tags_string: Optional. Used to set blob tags in various
+ blob operations.
+ :type blob_tags_string: str
+ :param blob_http_headers: Additional parameters for the operation
+ :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ blob_content_type = None
+ if blob_http_headers is not None:
+ blob_content_type = blob_http_headers.blob_content_type
+ blob_content_encoding = None
+ if blob_http_headers is not None:
+ blob_content_encoding = blob_http_headers.blob_content_encoding
+ blob_content_language = None
+ if blob_http_headers is not None:
+ blob_content_language = blob_http_headers.blob_content_language
+ blob_content_md5 = None
+ if blob_http_headers is not None:
+ blob_content_md5 = blob_http_headers.blob_content_md5
+ blob_cache_control = None
+ if blob_http_headers is not None:
+ blob_cache_control = blob_http_headers.blob_cache_control
+ blob_content_disposition = None
+ if blob_http_headers is not None:
+ blob_content_disposition = blob_http_headers.blob_content_disposition
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ # Construct URL
+ url = self.create.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if tier is not None:
+ header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str')
+ if metadata is not None:
+ header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
+ header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
+ if blob_sequence_number is not None:
+ header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if blob_tags_string is not None:
+ header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str')
+ header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str')
+ if blob_content_type is not None:
+ header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str')
+ if blob_content_encoding is not None:
+ header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str')
+ if blob_content_language is not None:
+ header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str')
+ if blob_content_md5 is not None:
+ header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray')
+ if blob_cache_control is not None:
+ header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str')
+ if blob_content_disposition is not None:
+ header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ create.metadata = {'url': '/{containerName}/{blob}'}
+
+ def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Upload Pages operation writes a range of pages to a page blob.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param transactional_content_md5: Specify the transactional md5 for
+ the body, to be validated by the service.
+ :type transactional_content_md5: bytearray
+ :param transactional_content_crc64: Specify the transactional crc64
+ for the body, to be validated by the service.
+ :type transactional_content_crc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param sequence_number_access_conditions: Additional parameters for
+ the operation
+ :type sequence_number_access_conditions:
+ ~azure.storage.blob.models.SequenceNumberAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_sequence_number_less_than_or_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+ if_sequence_number_less_than = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+ if_sequence_number_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "page"
+ page_write = "update"
+
+ # Construct URL
+ url = self.upload_pages.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/octet-stream'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if transactional_content_md5 is not None:
+ header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray')
+ if transactional_content_crc64 is not None:
+ header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray')
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_sequence_number_less_than_or_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
+ if if_sequence_number_less_than is not None:
+ header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
+ if if_sequence_number_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ upload_pages.metadata = {'url': '/{containerName}/{blob}'}
+
+ def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Clear Pages operation clears a set of pages from a page blob.
+
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param sequence_number_access_conditions: Additional parameters for
+ the operation
+ :type sequence_number_access_conditions:
+ ~azure.storage.blob.models.SequenceNumberAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_sequence_number_less_than_or_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+ if_sequence_number_less_than = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+ if_sequence_number_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "page"
+ page_write = "clear"
+
+ # Construct URL
+ url = self.clear_pages.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_sequence_number_less_than_or_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
+ if if_sequence_number_less_than is not None:
+ header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
+ if if_sequence_number_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ clear_pages.metadata = {'url': '/{containerName}/{blob}'}
+
+ def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs):
+ """The Upload Pages operation writes a range of pages to a page blob where
+ the contents are read from a URL.
+
+ :param source_url: Specify a URL to the copy source.
+ :type source_url: str
+ :param source_range: Bytes of source data in the specified range. The
+ length of this range should match the ContentLength header and
+ x-ms-range/Range destination range header.
+ :type source_range: str
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param range: The range of bytes to which the source range would be
+ written. The range should be 512 aligned and range-end is required.
+ :type range: str
+ :param source_content_md5: Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :type source_content_md5: bytearray
+ :param source_contentcrc64: Specify the crc64 calculated for the range
+ of bytes that must be read from the copy source.
+ :type source_contentcrc64: bytearray
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param sequence_number_access_conditions: Additional parameters for
+ the operation
+ :type sequence_number_access_conditions:
+ ~azure.storage.blob.models.SequenceNumberAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param source_modified_access_conditions: Additional parameters for
+ the operation
+ :type source_modified_access_conditions:
+ ~azure.storage.blob.models.SourceModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_sequence_number_less_than_or_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to
+ if_sequence_number_less_than = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than
+ if_sequence_number_equal_to = None
+ if sequence_number_access_conditions is not None:
+ if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+ source_if_modified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_modified_since = source_modified_access_conditions.source_if_modified_since
+ source_if_unmodified_since = None
+ if source_modified_access_conditions is not None:
+ source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since
+ source_if_match = None
+ if source_modified_access_conditions is not None:
+ source_if_match = source_modified_access_conditions.source_if_match
+ source_if_none_match = None
+ if source_modified_access_conditions is not None:
+ source_if_none_match = source_modified_access_conditions.source_if_none_match
+
+ comp = "page"
+ page_write = "update"
+
+ # Construct URL
+ url = self.upload_pages_from_url.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str')
+ header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str')
+ if source_content_md5 is not None:
+ header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray')
+ if source_contentcrc64 is not None:
+ header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray')
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_sequence_number_less_than_or_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long')
+ if if_sequence_number_less_than is not None:
+ header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long')
+ if if_sequence_number_equal_to is not None:
+ header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+ if source_if_modified_since is not None:
+ header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123')
+ if source_if_unmodified_since is not None:
+ header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123')
+ if source_if_match is not None:
+ header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str')
+ if source_if_none_match is not None:
+ header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [201]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')),
+ 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')),
+ 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')),
+ 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'}
+
+ def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Get Page Ranges operation returns the list of valid page ranges for
+ a page blob or snapshot of a page blob.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: PageList or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.PageList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "pagelist"
+
+ # Construct URL
+ url = self.get_page_ranges.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PageList', response)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_page_ranges.metadata = {'url': '/{containerName}/{blob}'}
+
+ def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Get Page Ranges Diff operation returns the list of valid page
+ ranges for a page blob that were changed between target blob and
+ previous snapshot.
+
+ :param snapshot: The snapshot parameter is an opaque DateTime value
+ that, when present, specifies the blob snapshot to retrieve. For more
+ information on working with blob snapshots, see Creating
+ a Snapshot of a Blob.
+ :type snapshot: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param prevsnapshot: Optional in version 2015-07-08 and newer. The
+ prevsnapshot parameter is a DateTime value that specifies that the
+ response will contain only pages that were changed between target blob
+ and previous snapshot. Changed pages include both updated and cleared
+ pages. The target blob may be a snapshot, as long as the snapshot
+ specified by prevsnapshot is the older of the two. Note that
+ incremental snapshots are currently supported only for blobs created
+ on or after January 1, 2016.
+ :type prevsnapshot: str
+ :param prev_snapshot_url: Optional. This header is only supported in
+ service versions 2019-04-19 and after and specifies the URL of a
+ previous snapshot of the target blob. The response will only contain
+ pages that were changed between the target blob and its previous
+ snapshot.
+ :type prev_snapshot_url: str
+ :param range: Return only the bytes of the blob in the specified
+ range.
+ :type range: str
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: PageList or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.PageList
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "pagelist"
+
+ # Construct URL
+ url = self.get_page_ranges_diff.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if snapshot is not None:
+ query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if prevsnapshot is not None:
+ query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ if prev_snapshot_url is not None:
+ header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str')
+ if range is not None:
+ header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('PageList', response)
+ header_dict = {
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'}
+
+ def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Resize the Blob.
+
+ :param blob_content_length: This header specifies the maximum size for
+ the page blob, up to 1 TB. The page blob size must be aligned to a
+ 512-byte boundary.
+ :type blob_content_length: long
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param cpk_info: Additional parameters for the operation
+ :type cpk_info: ~azure.storage.blob.models.CpkInfo
+ :param cpk_scope_info: Additional parameters for the operation
+ :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ encryption_key = None
+ if cpk_info is not None:
+ encryption_key = cpk_info.encryption_key
+ encryption_key_sha256 = None
+ if cpk_info is not None:
+ encryption_key_sha256 = cpk_info.encryption_key_sha256
+ encryption_algorithm = None
+ if cpk_info is not None:
+ encryption_algorithm = cpk_info.encryption_algorithm
+ encryption_scope = None
+ if cpk_scope_info is not None:
+ encryption_scope = cpk_scope_info.encryption_scope
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "properties"
+
+ # Construct URL
+ url = self.resize.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if encryption_key is not None:
+ header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str')
+ if encryption_key_sha256 is not None:
+ header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str')
+ if encryption_algorithm is not None:
+ header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType')
+ if encryption_scope is not None:
+ header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ resize.metadata = {'url': '/{containerName}/{blob}'}
+
+ def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs):
+ """Update the sequence number of the blob.
+
+ :param sequence_number_action: Required if the
+ x-ms-blob-sequence-number header is set for the request. This property
+ applies to page blobs only. This property indicates how the service
+ should modify the blob's sequence number. Possible values include:
+ 'max', 'update', 'increment'
+ :type sequence_number_action: str or
+ ~azure.storage.blob.models.SequenceNumberActionType
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param blob_sequence_number: Set for page blobs only. The sequence
+ number is a user-controlled value that you can use to track requests.
+ The value of the sequence number must be between 0 and 2^63 - 1.
+ :type blob_sequence_number: long
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param lease_access_conditions: Additional parameters for the
+ operation
+ :type lease_access_conditions:
+ ~azure.storage.blob.models.LeaseAccessConditions
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ lease_id = None
+ if lease_access_conditions is not None:
+ lease_id = lease_access_conditions.lease_id
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "properties"
+
+ # Construct URL
+ url = self.update_sequence_number.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType')
+ if blob_sequence_number is not None:
+ header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if lease_id is not None:
+ header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ update_sequence_number.metadata = {'url': '/{containerName}/{blob}'}
+
+ def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs):
+ """The Copy Incremental operation copies a snapshot of the source page
+ blob to a destination page blob. The snapshot is copied such that only
+ the differential changes between the previously copied snapshot are
+ transferred to the destination. The copied snapshots are complete
+ copies of the original snapshot and can be read or copied from as
+ usual. This API is supported since REST version 2016-05-31.
+
+ :param copy_source: Specifies the name of the source page blob
+ snapshot. This value is a URL of up to 2 KB in length that specifies a
+ page blob snapshot. The value should be URL-encoded as it would appear
+ in a request URI. The source blob must either be public or must be
+ authenticated via a shared access signature.
+ :type copy_source: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param modified_access_conditions: Additional parameters for the
+ operation
+ :type modified_access_conditions:
+ ~azure.storage.blob.models.ModifiedAccessConditions
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ if_modified_since = None
+ if modified_access_conditions is not None:
+ if_modified_since = modified_access_conditions.if_modified_since
+ if_unmodified_since = None
+ if modified_access_conditions is not None:
+ if_unmodified_since = modified_access_conditions.if_unmodified_since
+ if_match = None
+ if modified_access_conditions is not None:
+ if_match = modified_access_conditions.if_match
+ if_none_match = None
+ if modified_access_conditions is not None:
+ if_none_match = modified_access_conditions.if_none_match
+ if_tags = None
+ if modified_access_conditions is not None:
+ if_tags = modified_access_conditions.if_tags
+
+ comp = "incrementalcopy"
+
+ # Construct URL
+ url = self.copy_incremental.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+ if if_modified_since is not None:
+ header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123')
+ if if_unmodified_since is not None:
+ header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123')
+ if if_match is not None:
+ header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
+ if if_none_match is not None:
+ header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
+ if if_tags is not None:
+ header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'ETag': self._deserialize('str', response.headers.get('ETag')),
+ 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')),
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')),
+ 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ copy_incremental.metadata = {'url': '/{containerName}/{blob}'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_service_operations.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_service_operations.py
new file mode 100644
index 00000000000..0a49915e1dd
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/operations/_service_operations.py
@@ -0,0 +1,663 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.core.exceptions import map_error
+
+from .. import models
+
+
+class ServiceOperations(object):
+ """ServiceOperations operations.
+
+ You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
+
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+
+ self._config = config
+
+ def set_properties(self, storage_service_properties, timeout=None, request_id=None, cls=None, **kwargs):
+ """Sets properties for a storage account's Blob service endpoint,
+ including properties for Storage Analytics and CORS (Cross-Origin
+ Resource Sharing) rules.
+
+ :param storage_service_properties: The StorageService properties.
+ :type storage_service_properties:
+ ~azure.storage.blob.models.StorageServiceProperties
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "properties"
+
+ # Construct URL
+ url = self.set_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct body
+ body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties')
+
+ # Construct and send request
+ request = self._client.put(url, query_parameters, header_parameters, body_content)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ set_properties.metadata = {'url': '/'}
+
+ def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs):
+ """gets the properties of a storage account's Blob service, including
+ properties for Storage Analytics and CORS (Cross-Origin Resource
+ Sharing) rules.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: StorageServiceProperties or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.StorageServiceProperties
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_properties.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('StorageServiceProperties', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_properties.metadata = {'url': '/'}
+
+ def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs):
+ """Retrieves statistics related to replication for the Blob service. It is
+ only available on the secondary location endpoint when read-access
+ geo-redundant replication is enabled for the storage account.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: StorageServiceStats or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.StorageServiceStats
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "stats"
+
+ # Construct URL
+ url = self.get_statistics.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('StorageServiceStats', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_statistics.metadata = {'url': '/'}
+
+ def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs):
+ """The List Containers Segment operation returns a list of the containers
+ under the specified account.
+
+ :param prefix: Filters the results to return only containers whose
+ name begins with the specified prefix.
+ :type prefix: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param include: Include this parameter to specify that the container's
+ metadata be returned as part of the response body.
+ :type include: list[str or
+ ~azure.storage.blob.models.ListContainersIncludeType]
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: ListContainersSegmentResponse or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "list"
+
+ # Construct URL
+ url = self.list_containers_segment.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if prefix is not None:
+ query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ if include is not None:
+ query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',')
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('ListContainersSegmentResponse', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ list_containers_segment.metadata = {'url': '/'}
+
+ def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=None, **kwargs):
+ """Retrieves a user delegation key for the Blob service. This is only a
+ valid operation when using bearer token authentication.
+
+ :param key_info:
+ :type key_info: ~azure.storage.blob.models.KeyInfo
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: UserDelegationKey or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.UserDelegationKey
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "service"
+ comp = "userdelegationkey"
+
+ # Construct URL
+ url = self.get_user_delegation_key.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct body
+ body_content = self._serialize.body(key_info, 'KeyInfo')
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, body_content)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('UserDelegationKey', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ get_user_delegation_key.metadata = {'url': '/'}
+
+ def get_account_info(self, cls=None, **kwargs):
+ """Returns the sku name and account kind .
+
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: None or the result of cls(response)
+ :rtype: None
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ restype = "account"
+ comp = "properties"
+
+ # Construct URL
+ url = self.get_account_info.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ if cls:
+ response_headers = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')),
+ 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+ return cls(response, None, response_headers)
+ get_account_info.metadata = {'url': '/'}
+
+ def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, cls=None, **kwargs):
+ """The Batch operation allows multiple API calls to be embedded into a
+ single HTTP request.
+
+ :param body: Initial data
+ :type body: Generator
+ :param content_length: The length of the request.
+ :type content_length: long
+ :param multipart_content_type: Required. The value of this header must
+ be multipart/mixed with a batch boundary. Example header value:
+ multipart/mixed; boundary=batch_
+ :type multipart_content_type: str
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: object or the result of cls(response)
+ :rtype: Generator
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "batch"
+
+ # Construct URL
+ url = self.submit_batch.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['Content-Type'] = 'application/xml; charset=utf-8'
+ header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
+ header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str')
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct body
+
+ # Construct and send request
+ request = self._client.post(url, query_parameters, header_parameters, stream_content=body)
+ pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = response.stream_download(self._client._pipeline)
+ header_dict = {
+ 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ submit_batch.metadata = {'url': '/'}
+
+ def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, cls=None, **kwargs):
+ """The Filter Blobs operation enables callers to list blobs across all
+ containers whose tags match a given search expression. Filter blobs
+ searches across all containers within a storage account but can be
+ scoped within the expression to a single container.
+
+ :param timeout: The timeout parameter is expressed in seconds. For
+ more information, see Setting
+ Timeouts for Blob Service Operations.
+ :type timeout: int
+ :param request_id: Provides a client-generated, opaque value with a 1
+ KB character limit that is recorded in the analytics logs when storage
+ analytics logging is enabled.
+ :type request_id: str
+ :param where: Filters the results to return only to return only blobs
+ whose tags match the specified expression.
+ :type where: str
+ :param marker: A string value that identifies the portion of the list
+ of containers to be returned with the next listing operation. The
+ operation returns the NextMarker value within the response body if the
+ listing operation did not return all containers remaining to be listed
+ with the current page. The NextMarker value can be used as the value
+ for the marker parameter in a subsequent call to request the next page
+ of list items. The marker value is opaque to the client.
+ :type marker: str
+ :param maxresults: Specifies the maximum number of containers to
+ return. If the request does not specify maxresults, or specifies a
+ value greater than 5000, the server will return up to 5000 items. Note
+ that if the listing operation crosses a partition boundary, then the
+ service will return a continuation token for retrieving the remainder
+ of the results. For this reason, it is possible that the service will
+ return fewer results than specified by maxresults, or than the default
+ of 5000.
+ :type maxresults: int
+ :param callable cls: A custom type or function that will be passed the
+ direct response
+ :return: FilterBlobSegment or the result of cls(response)
+ :rtype: ~azure.storage.blob.models.FilterBlobSegment
+ :raises:
+ :class:`StorageErrorException`
+ """
+ error_map = kwargs.pop('error_map', None)
+ comp = "blobs"
+
+ # Construct URL
+ url = self.filter_blobs.metadata['url']
+ path_format_arguments = {
+ 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True)
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {}
+ if timeout is not None:
+ query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
+ if where is not None:
+ query_parameters['where'] = self._serialize.query("where", where, 'str')
+ if marker is not None:
+ query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
+ if maxresults is not None:
+ query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
+ query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
+
+ # Construct headers
+ header_parameters = {}
+ header_parameters['Accept'] = 'application/xml'
+ header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
+ if request_id is not None:
+ header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str')
+
+ # Construct and send request
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise models.StorageErrorException(response, self._deserialize)
+
+ header_dict = {}
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('FilterBlobSegment', response)
+ header_dict = {
+ 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')),
+ 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')),
+ 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')),
+ 'Date': self._deserialize('rfc-1123', response.headers.get('Date')),
+ 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')),
+ }
+
+ if cls:
+ return cls(response, deserialized, header_dict)
+
+ return deserialized
+ filter_blobs.metadata = {'url': '/'}
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/version.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/version.py
new file mode 100644
index 00000000000..6ef707dd11c
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_generated/version.py
@@ -0,0 +1,13 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+VERSION = "2020-02-10"
+
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_lease.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_lease.py
new file mode 100644
index 00000000000..1fd668c0f9b
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_lease.py
@@ -0,0 +1,331 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import uuid
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, TypeVar, TYPE_CHECKING
+)
+
+from azure.core.tracing.decorator import distributed_trace
+
+from ._shared.response_handlers import return_response_headers, process_storage_error
+from ._generated.models import StorageErrorException
+from ._serialize import get_modify_conditions
+
+if TYPE_CHECKING:
+ from datetime import datetime
+
+ BlobClient = TypeVar("BlobClient")
+ ContainerClient = TypeVar("ContainerClient")
+
+
+class BlobLeaseClient(object):
+ """Creates a new BlobLeaseClient.
+
+ This client provides lease operations on a BlobClient or ContainerClient.
+
+ :ivar str id:
+ The ID of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired.
+ :ivar str etag:
+ The ETag of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired or modified.
+ :ivar ~datetime.datetime last_modified:
+ The last modified timestamp of the lease currently being maintained.
+ This will be `None` if no lease has yet been acquired or modified.
+
+ :param client:
+ The client of the blob or container to lease.
+ :type client: ~azure.storage.blob.BlobClient or
+ ~azure.storage.blob.ContainerClient
+ :param str lease_id:
+ A string representing the lease ID of an existing lease. This value does not
+ need to be specified in order to acquire a new lease, or break one.
+ """
+ def __init__(
+ self, client, lease_id=None
+ ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
+ # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None
+ self.id = lease_id or str(uuid.uuid4())
+ self.last_modified = None
+ self.etag = None
+ if hasattr(client, 'blob_name'):
+ self._client = client._client.blob # type: ignore # pylint: disable=protected-access
+ elif hasattr(client, 'container_name'):
+ self._client = client._client.container # type: ignore # pylint: disable=protected-access
+ else:
+ raise TypeError("Lease must use either BlobClient or ContainerClient.")
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.release()
+
+ @distributed_trace
+ def acquire(self, lease_duration=-1, **kwargs):
+ # type: (int, **Any) -> None
+ """Requests a new lease.
+
+ If the container does not have an active lease, the Blob service creates a
+ lease on the container and returns a new lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = self._client.acquire_lease(
+ timeout=kwargs.pop('timeout', None),
+ duration=lease_duration,
+ proposed_lease_id=self.id,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+ self.etag = response.get('etag') # type: str
+
+ @distributed_trace
+ def renew(self, **kwargs):
+ # type: (Any) -> None
+ """Renews the lease.
+
+ The lease can be renewed if the lease ID specified in the
+ lease client matches that associated with the container or blob. Note that
+ the lease may be renewed even if it has expired as long as the container
+ or blob has not been leased again since the expiration of that lease. When you
+ renew a lease, the lease duration clock resets.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = self._client.renew_lease(
+ lease_id=self.id,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.etag = response.get('etag') # type: str
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+
+ @distributed_trace
+ def release(self, **kwargs):
+ # type: (Any) -> None
+ """Release the lease.
+
+ The lease may be released if the client lease id specified matches
+ that associated with the container or blob. Releasing the lease allows another client
+ to immediately acquire the lease for the container or blob as soon as the release is complete.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = self._client.release_lease(
+ lease_id=self.id,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.etag = response.get('etag') # type: str
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+
+ @distributed_trace
+ def change(self, proposed_lease_id, **kwargs):
+ # type: (str, Any) -> None
+ """Change the lease ID of an active lease.
+
+ :param str proposed_lease_id:
+ Proposed lease ID, in a GUID string format. The Blob service returns 400
+ (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = self._client.change_lease(
+ lease_id=self.id,
+ proposed_lease_id=proposed_lease_id,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.etag = response.get('etag') # type: str
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+
+ @distributed_trace
+ def break_lease(self, lease_break_period=None, **kwargs):
+ # type: (Optional[int], Any) -> int
+ """Break the lease, if the container or blob has an active lease.
+
+ Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+ the request is not required to specify a matching lease ID. When a lease
+ is broken, the lease break period is allowed to elapse, during which time
+ no lease operation except break and release can be performed on the container or blob.
+ When a lease is successfully broken, the response indicates the interval
+ in seconds until a new lease can be acquired.
+
+ :param int lease_break_period:
+ This is the proposed duration of seconds that the lease
+ should continue before it is broken, between 0 and 60 seconds. This
+ break period is only used if it is shorter than the time remaining
+ on the lease. If longer, the time remaining on the lease is used.
+ A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break
+ period. If this header does not appear with a break
+ operation, a fixed-duration lease breaks after the remaining lease
+ period elapses, and an infinite lease breaks immediately.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Approximate time remaining in the lease period, in seconds.
+ :rtype: int
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = self._client.break_lease(
+ timeout=kwargs.pop('timeout', None),
+ break_period=lease_break_period,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return response.get('lease_time') # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_list_blobs_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_list_blobs_helper.py
new file mode 100644
index 00000000000..f1dd70f992c
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_list_blobs_helper.py
@@ -0,0 +1,166 @@
+# pylint: disable=too-many-lines
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from azure.core.paging import PageIterator, ItemPaged
+from ._deserialize import get_blob_properties_from_generated_code
+from ._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix
+from ._models import BlobProperties
+from ._shared.models import DictMixin
+from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
+
+
+class BlobPropertiesPaged(PageIterator):
+ """An Iterable of Blob properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A blob name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.BlobProperties)
+ :ivar str container: The container that the blobs are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str container: The name of the container.
+ :param str prefix: Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of blobs to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ :param str delimiter:
+ Used to capture blobs whose names begin with the same substring up to
+ the appearance of the delimiter character. The delimiter may be a single
+ character or a string.
+ :param location_mode: Specifies the location the request should be sent to.
+ This mode only applies for RA-GRS accounts which allow secondary read access.
+ Options include 'primary' or 'secondary'.
+ """
+ def __init__(
+ self, command,
+ container=None,
+ prefix=None,
+ results_per_page=None,
+ continuation_token=None,
+ delimiter=None,
+ location_mode=None):
+ super(BlobPropertiesPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.prefix = prefix
+ self.marker = None
+ self.results_per_page = results_per_page
+ self.container = container
+ self.delimiter = delimiter
+ self.current_page = None
+ self.location_mode = location_mode
+
+ def _get_next_cb(self, continuation_token):
+ try:
+ return self._command(
+ prefix=self.prefix,
+ marker=continuation_token or None,
+ maxresults=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.prefix = self._response.prefix
+ self.marker = self._response.marker
+ self.results_per_page = self._response.max_results
+ self.container = self._response.container_name
+ self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
+
+ return self._response.next_marker or None, self.current_page
+
+ def _build_item(self, item):
+ if isinstance(item, BlobProperties):
+ return item
+ if isinstance(item, BlobItemInternal):
+ blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access
+ blob.container = self.container
+ return blob
+ return item
+
+
+class BlobPrefixPaged(BlobPropertiesPaged):
+ def __init__(self, *args, **kwargs):
+ super(BlobPrefixPaged, self).__init__(*args, **kwargs)
+ self.name = self.prefix
+
+ def _extract_data_cb(self, get_next_return):
+ continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
+ self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+ self.current_page = [self._build_item(item) for item in self.current_page]
+ self.delimiter = self._response.delimiter
+
+ return continuation_token, self.current_page
+
+ def _build_item(self, item):
+ item = super(BlobPrefixPaged, self)._build_item(item)
+ if isinstance(item, GenBlobPrefix):
+ return BlobPrefix(
+ self._command,
+ container=self.container,
+ prefix=item.name,
+ results_per_page=self.results_per_page,
+ location_mode=self.location_mode)
+ return item
+
+
+class BlobPrefix(ItemPaged, DictMixin):
+ """An Iterable of Blob properties.
+
+ Returned from walk_blobs when a delimiter is used.
+ Can be thought of as a virtual blob directory.
+
+ :ivar str name: The prefix, or "directory name" of the blob.
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A blob name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str next_marker: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.BlobProperties)
+ :ivar str container: The container that the blobs are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str prefix: Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of blobs to retrieve per
+ call.
+ :param str marker: An opaque continuation token.
+ :param str delimiter:
+ Used to capture blobs whose names begin with the same substring up to
+ the appearance of the delimiter character. The delimiter may be a single
+ character or a string.
+ :param location_mode: Specifies the location the request should be sent to.
+ This mode only applies for RA-GRS accounts which allow secondary read access.
+ Options include 'primary' or 'secondary'.
+ """
+ def __init__(self, *args, **kwargs):
+ super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
+ self.name = kwargs.get('prefix')
+ self.prefix = kwargs.get('prefix')
+ self.results_per_page = kwargs.get('results_per_page')
+ self.container = kwargs.get('container')
+ self.delimiter = kwargs.get('delimiter')
+ self.location_mode = kwargs.get('location_mode')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_models.py
new file mode 100644
index 00000000000..a7658cc5e3e
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_models.py
@@ -0,0 +1,1173 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+
+from enum import Enum
+
+from azure.core.paging import PageIterator
+from ._generated.models import FilterBlobItem, ArrowField
+
+from ._shared import decode_base64_to_text
+from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
+from ._shared.models import DictMixin, get_enum_value
+from ._generated.models import Logging as GeneratedLogging
+from ._generated.models import Metrics as GeneratedMetrics
+from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
+from ._generated.models import StaticWebsite as GeneratedStaticWebsite
+from ._generated.models import CorsRule as GeneratedCorsRule
+from ._generated.models import AccessPolicy as GenAccessPolicy
+from ._generated.models import StorageErrorException
+
+
+class BlobType(str, Enum):
+
+ BlockBlob = "BlockBlob"
+ PageBlob = "PageBlob"
+ AppendBlob = "AppendBlob"
+
+
+class BlockState(str, Enum):
+ """Block blob block types."""
+
+ Committed = 'Committed' #: Committed blocks.
+ Latest = 'Latest' #: Latest blocks.
+ Uncommitted = 'Uncommitted' #: Uncommitted blocks.
+
+
+class StandardBlobTier(str, Enum):
+ """
+ Specifies the blob tier to set the blob to. This is only applicable for
+ block blobs on standard storage accounts.
+ """
+
+ Archive = 'Archive' #: Archive
+ Cool = 'Cool' #: Cool
+ Hot = 'Hot' #: Hot
+
+
+class PremiumPageBlobTier(str, Enum):
+ """
+ Specifies the page blob tier to set the blob to. This is only applicable to page
+ blobs on premium storage accounts. Please take a look at:
+ https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
+ for detailed information on the corresponding IOPS and throughput per PageBlobTier.
+ """
+
+ P4 = 'P4' #: P4 Tier
+ P6 = 'P6' #: P6 Tier
+ P10 = 'P10' #: P10 Tier
+ P20 = 'P20' #: P20 Tier
+ P30 = 'P30' #: P30 Tier
+ P40 = 'P40' #: P40 Tier
+ P50 = 'P50' #: P50 Tier
+ P60 = 'P60' #: P60 Tier
+
+
+class SequenceNumberAction(str, Enum):
+ """Sequence number actions."""
+
+ Increment = 'increment'
+ """
+ Increments the value of the sequence number by 1. If specifying this option,
+ do not include the x-ms-blob-sequence-number header.
+ """
+
+ Max = 'max'
+ """
+ Sets the sequence number to be the higher of the value included with the
+ request and the value currently stored for the blob.
+ """
+
+ Update = 'update'
+ """Sets the sequence number to the value included with the request."""
+
+
+class PublicAccess(str, Enum):
+ """
+ Specifies whether data in the container may be accessed publicly and the level of access.
+ """
+
+ OFF = 'off'
+ """
+ Specifies that there is no public read access for both the container and blobs within the container.
+ Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
+ """
+
+ Blob = 'blob'
+ """
+ Specifies public read access for blobs. Blob data within this container can be read
+ via anonymous request, but container data is not available. Clients cannot enumerate
+ blobs within the container via anonymous request.
+ """
+
+ Container = 'container'
+ """
+ Specifies full public read access for container and blob data. Clients can enumerate
+ blobs within the container via anonymous request, but cannot enumerate containers
+ within the storage account.
+ """
+
+
+class BlobAnalyticsLogging(GeneratedLogging):
+ """Azure Analytics Logging settings.
+
+ :keyword str version:
+ The version of Storage Analytics to configure. The default value is 1.0.
+ :keyword bool delete:
+ Indicates whether all delete requests should be logged. The default value is `False`.
+ :keyword bool read:
+ Indicates whether all read requests should be logged. The default value is `False`.
+ :keyword bool write:
+ Indicates whether all write requests should be logged. The default value is `False`.
+ :keyword ~azure.storage.blob.RetentionPolicy retention_policy:
+ Determines how long the associated data should persist. If not specified the retention
+ policy will be disabled by default.
+ """
+
+ def __init__(self, **kwargs):
+ self.version = kwargs.get('version', u'1.0')
+ self.delete = kwargs.get('delete', False)
+ self.read = kwargs.get('read', False)
+ self.write = kwargs.get('write', False)
+ self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ version=generated.version,
+ delete=generated.delete,
+ read=generated.read,
+ write=generated.write,
+ retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
+ )
+
+
+class Metrics(GeneratedMetrics):
+ """A summary of request statistics grouped by API in hour or minute aggregates
+ for blobs.
+
+ :keyword str version:
+ The version of Storage Analytics to configure. The default value is 1.0.
+ :keyword bool enabled:
+ Indicates whether metrics are enabled for the Blob service.
+ The default value is `False`.
+ :keyword bool include_apis:
+ Indicates whether metrics should generate summary statistics for called API operations.
+ :keyword ~azure.storage.blob.RetentionPolicy retention_policy:
+ Determines how long the associated data should persist. If not specified the retention
+ policy will be disabled by default.
+ """
+
+ def __init__(self, **kwargs):
+ self.version = kwargs.get('version', u'1.0')
+ self.enabled = kwargs.get('enabled', False)
+ self.include_apis = kwargs.get('include_apis')
+ self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ version=generated.version,
+ enabled=generated.enabled,
+ include_apis=generated.include_apis,
+ retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
+ )
+
+
+class RetentionPolicy(GeneratedRetentionPolicy):
+ """The retention policy which determines how long the associated data should
+ persist.
+
+ :param bool enabled:
+ Indicates whether a retention policy is enabled for the storage service.
+ The default value is False.
+ :param int days:
+ Indicates the number of days that metrics or logging or
+ soft-deleted data should be retained. All data older than this value will
+ be deleted. If enabled=True, the number of days must be specified.
+ """
+
+ def __init__(self, enabled=False, days=None):
+ self.enabled = enabled
+ self.days = days
+ if self.enabled and (self.days is None):
+ raise ValueError("If policy is enabled, 'days' must be specified.")
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ enabled=generated.enabled,
+ days=generated.days,
+ )
+
+
+class StaticWebsite(GeneratedStaticWebsite):
+ """The properties that enable an account to host a static website.
+
+ :keyword bool enabled:
+ Indicates whether this account is hosting a static website.
+ The default value is `False`.
+ :keyword str index_document:
+ The default name of the index page under each directory.
+ :keyword str error_document404_path:
+ The absolute path of the custom 404 page.
+ :keyword str default_index_document_path:
+ Absolute path of the default index page.
+ """
+
+ def __init__(self, **kwargs):
+ self.enabled = kwargs.get('enabled', False)
+ if self.enabled:
+ self.index_document = kwargs.get('index_document')
+ self.error_document404_path = kwargs.get('error_document404_path')
+ self.default_index_document_path = kwargs.get('default_index_document_path')
+ else:
+ self.index_document = None
+ self.error_document404_path = None
+ self.default_index_document_path = None
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if not generated:
+ return cls()
+ return cls(
+ enabled=generated.enabled,
+ index_document=generated.index_document,
+ error_document404_path=generated.error_document404_path,
+ default_index_document_path=generated.default_index_document_path
+ )
+
+
+class CorsRule(GeneratedCorsRule):
+ """CORS is an HTTP feature that enables a web application running under one
+ domain to access resources in another domain. Web browsers implement a
+ security restriction known as same-origin policy that prevents a web page
+ from calling APIs in a different domain; CORS provides a secure way to
+ allow one domain (the origin domain) to call APIs in another domain.
+
+ :param list(str) allowed_origins:
+ A list of origin domains that will be allowed via CORS, or "*" to allow
+ all domains. The list of must contain at least one entry. Limited to 64
+ origin domains. Each allowed origin can have up to 256 characters.
+ :param list(str) allowed_methods:
+ A list of HTTP methods that are allowed to be executed by the origin.
+ The list of must contain at least one entry. For Azure Storage,
+ permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
+ :keyword list(str) allowed_headers:
+ Defaults to an empty list. A list of headers allowed to be part of
+ the cross-origin request. Limited to 64 defined headers and 2 prefixed
+ headers. Each header can be up to 256 characters.
+ :keyword list(str) exposed_headers:
+ Defaults to an empty list. A list of response headers to expose to CORS
+ clients. Limited to 64 defined headers and two prefixed headers. Each
+ header can be up to 256 characters.
+ :keyword int max_age_in_seconds:
+ The number of seconds that the client/browser should cache a
+ preflight response.
+ """
+
+ def __init__(self, allowed_origins, allowed_methods, **kwargs):
+ self.allowed_origins = ','.join(allowed_origins)
+ self.allowed_methods = ','.join(allowed_methods)
+ self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
+ self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
+ self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
+
+ @classmethod
+ def _from_generated(cls, generated):
+ return cls(
+ [generated.allowed_origins],
+ [generated.allowed_methods],
+ allowed_headers=[generated.allowed_headers],
+ exposed_headers=[generated.exposed_headers],
+ max_age_in_seconds=generated.max_age_in_seconds,
+ )
+
+
+class ContainerProperties(DictMixin):
+ """Blob container's properties class.
+
+ Returned ``ContainerProperties`` instances expose these values through a
+ dictionary interface, for example: ``container_props["last_modified"]``.
+ Additionally, the container name is available as ``container_props["name"]``.
+
+ :ivar ~datetime.datetime last_modified:
+ A datetime object representing the last time the container was modified.
+ :ivar str etag:
+ The ETag contains a value that you can use to perform operations
+ conditionally.
+ :ivar ~azure.storage.blob.LeaseProperties lease:
+ Stores all the lease information for the container.
+ :ivar str public_access: Specifies whether data in the container may be accessed
+ publicly and the level of access.
+ :ivar bool has_immutability_policy:
+ Represents whether the container has an immutability policy.
+ :ivar bool has_legal_hold:
+ Represents whether the container has a legal hold.
+ :ivar dict metadata: A dict with name-value pairs to associate with the
+ container as metadata.
+ :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope:
+ The default encryption scope configuration for the container.
+ """
+
+ def __init__(self, **kwargs):
+ self.name = None
+ self.last_modified = kwargs.get('Last-Modified')
+ self.etag = kwargs.get('ETag')
+ self.lease = LeaseProperties(**kwargs)
+ self.public_access = kwargs.get('x-ms-blob-public-access')
+ self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy')
+ self.deleted = None
+ self.version = None
+ self.has_legal_hold = kwargs.get('x-ms-has-legal-hold')
+ self.metadata = kwargs.get('metadata')
+ self.encryption_scope = None
+ default_encryption_scope = kwargs.get('x-ms-default-encryption-scope')
+ if default_encryption_scope:
+ self.encryption_scope = ContainerEncryptionScope(
+ default_encryption_scope=default_encryption_scope,
+ prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False)
+ )
+
+ @classmethod
+ def _from_generated(cls, generated):
+ props = cls()
+ props.name = generated.name
+ props.last_modified = generated.properties.last_modified
+ props.etag = generated.properties.etag
+ props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
+ props.public_access = generated.properties.public_access
+ props.has_immutability_policy = generated.properties.has_immutability_policy
+ props.deleted = generated.deleted
+ props.version = generated.version
+ props.has_legal_hold = generated.properties.has_legal_hold
+ props.metadata = generated.metadata
+ props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access
+ return props
+
+
+class ContainerPropertiesPaged(PageIterator):
+ """An Iterable of Container properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A container name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.ContainerProperties)
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str prefix: Filters the results to return only containers whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of container names to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ """
+ def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
+ super(ContainerPropertiesPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.prefix = prefix
+ self.marker = None
+ self.results_per_page = results_per_page
+ self.location_mode = None
+ self.current_page = []
+
+ def _get_next_cb(self, continuation_token):
+ try:
+ return self._command(
+ marker=continuation_token or None,
+ maxresults=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.prefix = self._response.prefix
+ self.marker = self._response.marker
+ self.results_per_page = self._response.max_results
+ self.current_page = [self._build_item(item) for item in self._response.container_items]
+
+ return self._response.next_marker or None, self.current_page
+
+ @staticmethod
+ def _build_item(item):
+ return ContainerProperties._from_generated(item) # pylint: disable=protected-access
+
+
+class BlobProperties(DictMixin):
+ """
+ Blob Properties.
+
+ :ivar str name:
+ The name of the blob.
+ :ivar str container:
+ The container in which the blob resides.
+ :ivar str snapshot:
+ Datetime value that uniquely identifies the blob snapshot.
+ :ivar ~azure.blob.storage.BlobType blob_type:
+ String indicating this blob's type.
+ :ivar dict metadata:
+ Name-value pairs associated with the blob as metadata.
+ :ivar ~datetime.datetime last_modified:
+ A datetime object representing the last time the blob was modified.
+ :ivar str etag:
+ The ETag contains a value that you can use to perform operations
+ conditionally.
+ :ivar int size:
+ The size of the content returned. If the entire blob was requested,
+ the length of blob in bytes. If a subset of the blob was requested, the
+ length of the returned subset.
+ :ivar str content_range:
+ Indicates the range of bytes returned in the event that the client
+ requested a subset of the blob.
+ :ivar int append_blob_committed_block_count:
+ (For Append Blobs) Number of committed blocks in the blob.
+ :ivar bool is_append_blob_sealed:
+ Indicate if the append blob is sealed or not.
+
+ .. versionadded:: 12.4.0
+
+ :ivar int page_blob_sequence_number:
+ (For Page Blobs) Sequence number for page blob used for coordinating
+ concurrent writes.
+ :ivar bool server_encrypted:
+ Set to true if the blob is encrypted on the server.
+ :ivar ~azure.storage.blob.CopyProperties copy:
+ Stores all the copy properties for the blob.
+ :ivar ~azure.storage.blob.ContentSettings content_settings:
+ Stores all the content settings for the blob.
+ :ivar ~azure.storage.blob.LeaseProperties lease:
+ Stores all the lease information for the blob.
+ :ivar ~azure.storage.blob.StandardBlobTier blob_tier:
+ Indicates the access tier of the blob. The hot tier is optimized
+ for storing data that is accessed frequently. The cool storage tier
+ is optimized for storing data that is infrequently accessed and stored
+ for at least a month. The archive tier is optimized for storing
+ data that is rarely accessed and stored for at least six months
+ with flexible latency requirements.
+ :ivar str rehydrate_priority:
+ Indicates the priority with which to rehydrate an archived blob
+ :ivar ~datetime.datetime blob_tier_change_time:
+ Indicates when the access tier was last changed.
+ :ivar bool blob_tier_inferred:
+ Indicates whether the access tier was inferred by the service.
+ If false, it indicates that the tier was set explicitly.
+ :ivar bool deleted:
+ Whether this blob was deleted.
+ :ivar ~datetime.datetime deleted_time:
+ A datetime object representing the time at which the blob was deleted.
+ :ivar int remaining_retention_days:
+ The number of days that the blob will be retained before being permanently deleted by the service.
+ :ivar ~datetime.datetime creation_time:
+ Indicates when the blob was created, in UTC.
+ :ivar str archive_status:
+ Archive status of blob.
+ :ivar str encryption_key_sha256:
+ The SHA-256 hash of the provided encryption key.
+ :ivar str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+ :ivar bool request_server_encrypted:
+ Whether this blob is encrypted.
+ :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties:
+ Only present for blobs that have policy ids and rule ids applied to them.
+
+ .. versionadded:: 12.4.0
+
+ :ivar str object_replication_destination_policy:
+ Represents the Object Replication Policy Id that created this blob.
+
+ .. versionadded:: 12.4.0
+
+ :ivar ~datetime.datetime last_accessed_on:
+ Indicates when the last Read/Write operation was performed on a Blob.
+
+ .. versionadded:: 12.6.0
+
+ :ivar int tag_count:
+ Tags count on this blob.
+
+ .. versionadded:: 12.4.0
+
+ :ivar dict(str, str) tags:
+ Key value pair of tags on this blob.
+
+ .. versionadded:: 12.4.0
+
+ """
+
+ def __init__(self, **kwargs):
+ self.name = kwargs.get('name')
+ self.container = None
+ self.snapshot = kwargs.get('x-ms-snapshot')
+ self.version_id = kwargs.get('x-ms-version-id')
+ self.is_current_version = kwargs.get('x-ms-is-current-version')
+ self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None
+ self.metadata = kwargs.get('metadata')
+ self.encrypted_metadata = kwargs.get('encrypted_metadata')
+ self.last_modified = kwargs.get('Last-Modified')
+ self.etag = kwargs.get('ETag')
+ self.size = kwargs.get('Content-Length')
+ self.content_range = kwargs.get('Content-Range')
+ self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count')
+ self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed')
+ self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number')
+ self.server_encrypted = kwargs.get('x-ms-server-encrypted')
+ self.copy = CopyProperties(**kwargs)
+ self.content_settings = ContentSettings(**kwargs)
+ self.lease = LeaseProperties(**kwargs)
+ self.blob_tier = kwargs.get('x-ms-access-tier')
+ self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority')
+ self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time')
+ self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred')
+ self.deleted = False
+ self.deleted_time = None
+ self.remaining_retention_days = None
+ self.creation_time = kwargs.get('x-ms-creation-time')
+ self.archive_status = kwargs.get('x-ms-archive-status')
+ self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256')
+ self.encryption_scope = kwargs.get('x-ms-encryption-scope')
+ self.request_server_encrypted = kwargs.get('x-ms-server-encrypted')
+ self.object_replication_source_properties = kwargs.get('object_replication_source_properties')
+ self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id')
+ self.last_accessed_on = kwargs.get('x-ms-last-access-time')
+ self.tag_count = kwargs.get('x-ms-tag-count')
+ self.tags = None
+
+
+class FilteredBlob(DictMixin):
+ """Blob info from a Filter Blobs API call.
+
+ :ivar name: Blob name
+ :type name: str
+ :ivar container_name: Container name.
+ :type container_name: str
+ """
+ def __init__(self, **kwargs):
+ self.name = kwargs.get('name', None)
+ self.container_name = kwargs.get('container_name', None)
+
+
+class FilteredBlobPaged(PageIterator):
+ """An Iterable of Blob properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A blob name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.FilteredBlob)
+ :ivar str container: The container that the blobs are listed from.
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str container: The name of the container.
+ :param int results_per_page: The maximum number of blobs to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ :param location_mode: Specifies the location the request should be sent to.
+ This mode only applies for RA-GRS accounts which allow secondary read access.
+ Options include 'primary' or 'secondary'.
+ """
+ def __init__(
+ self, command,
+ container=None,
+ results_per_page=None,
+ continuation_token=None,
+ location_mode=None):
+ super(FilteredBlobPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.marker = continuation_token
+ self.results_per_page = results_per_page
+ self.container = container
+ self.current_page = None
+ self.location_mode = location_mode
+
+ def _get_next_cb(self, continuation_token):
+ try:
+ return self._command(
+ marker=continuation_token or None,
+ maxresults=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.marker = self._response.next_marker
+ self.current_page = [self._build_item(item) for item in self._response.blobs]
+
+ return self._response.next_marker or None, self.current_page
+
+ @staticmethod
+ def _build_item(item):
+ if isinstance(item, FilterBlobItem):
+ blob = FilteredBlob(name=item.name, container_name=item.container_name) # pylint: disable=protected-access
+ return blob
+ return item
+
+
+class LeaseProperties(DictMixin):
+ """Blob Lease Properties.
+
+ :ivar str status:
+ The lease status of the blob. Possible values: locked|unlocked
+ :ivar str state:
+ Lease state of the blob. Possible values: available|leased|expired|breaking|broken
+ :ivar str duration:
+ When a blob is leased, specifies whether the lease is of infinite or fixed duration.
+ """
+
+ def __init__(self, **kwargs):
+ self.status = get_enum_value(kwargs.get('x-ms-lease-status'))
+ self.state = get_enum_value(kwargs.get('x-ms-lease-state'))
+ self.duration = get_enum_value(kwargs.get('x-ms-lease-duration'))
+
+ @classmethod
+ def _from_generated(cls, generated):
+ lease = cls()
+ lease.status = get_enum_value(generated.properties.lease_status)
+ lease.state = get_enum_value(generated.properties.lease_state)
+ lease.duration = get_enum_value(generated.properties.lease_duration)
+ return lease
+
+
+class ContentSettings(DictMixin):
+ """The content settings of a blob.
+
+ :param str content_type:
+ The content type specified for the blob. If no content type was
+ specified, the default content type is application/octet-stream.
+ :param str content_encoding:
+ If the content_encoding has previously been set
+ for the blob, that value is stored.
+ :param str content_language:
+ If the content_language has previously been set
+ for the blob, that value is stored.
+ :param str content_disposition:
+ content_disposition conveys additional information about how to
+ process the response payload, and also can be used to attach
+ additional metadata. If content_disposition has previously been set
+ for the blob, that value is stored.
+ :param str cache_control:
+ If the cache_control has previously been set for
+ the blob, that value is stored.
+ :param str content_md5:
+ If the content_md5 has been set for the blob, this response
+ header is stored so that the client can check for message content
+ integrity.
+ """
+
+ def __init__(
+ self, content_type=None, content_encoding=None,
+ content_language=None, content_disposition=None,
+ cache_control=None, content_md5=None, **kwargs):
+
+ self.content_type = content_type or kwargs.get('Content-Type')
+ self.content_encoding = content_encoding or kwargs.get('Content-Encoding')
+ self.content_language = content_language or kwargs.get('Content-Language')
+ self.content_md5 = content_md5 or kwargs.get('Content-MD5')
+ self.content_disposition = content_disposition or kwargs.get('Content-Disposition')
+ self.cache_control = cache_control or kwargs.get('Cache-Control')
+
+ @classmethod
+ def _from_generated(cls, generated):
+ settings = cls()
+ settings.content_type = generated.properties.content_type or None
+ settings.content_encoding = generated.properties.content_encoding or None
+ settings.content_language = generated.properties.content_language or None
+ settings.content_md5 = generated.properties.content_md5 or None
+ settings.content_disposition = generated.properties.content_disposition or None
+ settings.cache_control = generated.properties.cache_control or None
+ return settings
+
+
+class CopyProperties(DictMixin):
+ """Blob Copy Properties.
+
+ These properties will be `None` if this blob has never been the destination
+ in a Copy Blob operation, or if this blob has been modified after a concluded
+ Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List.
+
+ :ivar str id:
+ String identifier for the last attempted Copy Blob operation where this blob
+ was the destination blob.
+ :ivar str source:
+ URL up to 2 KB in length that specifies the source blob used in the last attempted
+ Copy Blob operation where this blob was the destination blob.
+ :ivar str status:
+ State of the copy operation identified by Copy ID, with these values:
+ success:
+ Copy completed successfully.
+ pending:
+ Copy is in progress. Check copy_status_description if intermittent,
+ non-fatal errors impede copy progress but don't cause failure.
+ aborted:
+ Copy was ended by Abort Copy Blob.
+ failed:
+ Copy failed. See copy_status_description for failure details.
+ :ivar str progress:
+ Contains the number of bytes copied and the total bytes in the source in the last
+ attempted Copy Blob operation where this blob was the destination blob. Can show
+ between 0 and Content-Length bytes copied.
+ :ivar ~datetime.datetime completion_time:
+ Conclusion time of the last attempted Copy Blob operation where this blob was the
+ destination blob. This value can specify the time of a completed, aborted, or
+ failed copy attempt.
+ :ivar str status_description:
+ Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
+ or non-fatal copy operation failure.
+ :ivar bool incremental_copy:
+ Copies the snapshot of the source page blob to a destination page blob.
+ The snapshot is copied such that only the differential changes between
+ the previously copied snapshot are transferred to the destination
+ :ivar ~datetime.datetime destination_snapshot:
+ Included if the blob is incremental copy blob or incremental copy snapshot,
+ if x-ms-copy-status is success. Snapshot time of the last successful
+ incremental copy snapshot for this blob.
+ """
+
+ def __init__(self, **kwargs):
+ self.id = kwargs.get('x-ms-copy-id')
+ self.source = kwargs.get('x-ms-copy-source')
+ self.status = get_enum_value(kwargs.get('x-ms-copy-status'))
+ self.progress = kwargs.get('x-ms-copy-progress')
+ self.completion_time = kwargs.get('x-ms-copy-completion_time')
+ self.status_description = kwargs.get('x-ms-copy-status-description')
+ self.incremental_copy = kwargs.get('x-ms-incremental-copy')
+ self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot')
+
+ @classmethod
+ def _from_generated(cls, generated):
+ copy = cls()
+ copy.id = generated.properties.copy_id or None
+ copy.status = get_enum_value(generated.properties.copy_status) or None
+ copy.source = generated.properties.copy_source or None
+ copy.progress = generated.properties.copy_progress or None
+ copy.completion_time = generated.properties.copy_completion_time or None
+ copy.status_description = generated.properties.copy_status_description or None
+ copy.incremental_copy = generated.properties.incremental_copy or None
+ copy.destination_snapshot = generated.properties.destination_snapshot or None
+ return copy
+
+
+class BlobBlock(DictMixin):
+ """BlockBlob Block class.
+
+ :param str block_id:
+ Block id.
+ :param str state:
+ Block state. Possible values: committed|uncommitted
+ :ivar int size:
+ Block size in bytes.
+ """
+
+ def __init__(self, block_id, state=BlockState.Latest):
+ self.id = block_id
+ self.state = state
+ self.size = None
+
+ @classmethod
+ def _from_generated(cls, generated):
+ block = cls(decode_base64_to_text(generated.name))
+ block.size = generated.size
+ return block
+
+
+class PageRange(DictMixin):
+ """Page Range for page blob.
+
+ :param int start:
+ Start of page range in bytes.
+ :param int end:
+ End of page range in bytes.
+ """
+
+ def __init__(self, start=None, end=None):
+ self.start = start
+ self.end = end
+
+
+class AccessPolicy(GenAccessPolicy):
+ """Access Policy class used by the set and get access policy methods in each service.
+
+ A stored access policy can specify the start time, expiry time, and
+ permissions for the Shared Access Signatures with which it's associated.
+ Depending on how you want to control access to your resource, you can
+ specify all of these parameters within the stored access policy, and omit
+ them from the URL for the Shared Access Signature. Doing so permits you to
+ modify the associated signature's behavior at any time, as well as to revoke
+ it. Or you can specify one or more of the access policy parameters within
+ the stored access policy, and the others on the URL. Finally, you can
+ specify all of the parameters on the URL. In this case, you can use the
+ stored access policy to revoke the signature, but not to modify its behavior.
+
+ Together the Shared Access Signature and the stored access policy must
+ include all fields required to authenticate the signature. If any required
+ fields are missing, the request will fail. Likewise, if a field is specified
+ both in the Shared Access Signature URL and in the stored access policy, the
+ request will fail with status code 400 (Bad Request).
+
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~azure.storage.blob.ContainerSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: ~datetime.datetime or str
+ """
+ def __init__(self, permission=None, expiry=None, start=None):
+ self.start = start
+ self.expiry = expiry
+ self.permission = permission
+
+
+class ContainerSasPermissions(object):
+ """ContainerSasPermissions class to be used with the
+ :func:`~azure.storage.blob.generate_container_sas` function and
+ for the AccessPolicies used with
+ :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`.
+
+ :param bool read:
+ Read the content, properties, metadata or block list of any blob in the
+ container. Use any blob in the container as the source of a copy operation.
+ :param bool write:
+ For any blob in the container, create or write content, properties,
+ metadata, or block list. Snapshot or lease the blob. Resize the blob
+ (page blob only). Use the blob as the destination of a copy operation
+ within the same account. Note: You cannot grant permissions to read or
+ write container properties or metadata, nor to lease a container, with
+ a container SAS. Use an account SAS instead.
+ :param bool delete:
+ Delete any blob in the container. Note: You cannot grant permissions to
+ delete a container with a container SAS. Use an account SAS instead.
+ :param bool delete_previous_version:
+ Delete the previous blob version for the versioning enabled storage account.
+ :param bool list:
+ List blobs in the container.
+ :param bool tag:
+ Set or get tags on the blobs in the container.
+ """
+ def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin
+ self.read = read
+ self.write = write
+ self.delete = delete
+ self.list = list
+ self.delete_previous_version = delete_previous_version
+ self.tag = tag
+ self._str = (('r' if self.read else '') +
+ ('w' if self.write else '') +
+ ('d' if self.delete else '') +
+ ('x' if self.delete_previous_version else '') +
+ ('l' if self.list else '') +
+ ('t' if self.tag else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, permission):
+ """Create a ContainerSasPermissions from a string.
+
+ To specify read, write, delete, or list permissions you need only to
+ include the first letter of the word in the string. E.g. For read and
+ write permissions, you would provide a string "rw".
+
+ :param str permission: The string which dictates the read, write, delete,
+ and list permissions.
+ :return: A ContainerSasPermissions object
+ :rtype: ~azure.storage.blob.ContainerSasPermissions
+ """
+ p_read = 'r' in permission
+ p_write = 'w' in permission
+ p_delete = 'd' in permission
+ p_list = 'l' in permission
+ p_delete_previous_version = 'x' in permission
+ p_tag = 't' in permission
+ parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list,
+ delete_previous_version=p_delete_previous_version, tag=p_tag)
+
+ return parsed
+
+
+class BlobSasPermissions(object):
+ """BlobSasPermissions class to be used with the
+ :func:`~azure.storage.blob.generate_blob_sas` function.
+
+ :param bool read:
+ Read the content, properties, metadata and block list. Use the blob as
+ the source of a copy operation.
+ :param bool add:
+ Add a block to an append blob.
+ :param bool create:
+ Write a new blob, snapshot a blob, or copy a blob to a new blob.
+ :param bool write:
+ Create or write content, properties, metadata, or block list. Snapshot
+ or lease the blob. Resize the blob (page blob only). Use the blob as the
+ destination of a copy operation within the same account.
+ :param bool delete:
+ Delete the blob.
+ :param bool delete_previous_version:
+ Delete the previous blob version for the versioning enabled storage account.
+ :param bool tag:
+ Set or get tags on the blob.
+ """
+ def __init__(self, read=False, add=False, create=False, write=False,
+ delete=False, delete_previous_version=False, tag=True):
+ self.read = read
+ self.add = add
+ self.create = create
+ self.write = write
+ self.delete = delete
+ self.delete_previous_version = delete_previous_version
+ self.tag = tag
+ self._str = (('r' if self.read else '') +
+ ('a' if self.add else '') +
+ ('c' if self.create else '') +
+ ('w' if self.write else '') +
+ ('d' if self.delete else '') +
+ ('x' if self.delete_previous_version else '') +
+ ('t' if self.tag else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, permission):
+ """Create a BlobSasPermissions from a string.
+
+ To specify read, add, create, write, or delete permissions you need only to
+ include the first letter of the word in the string. E.g. For read and
+ write permissions, you would provide a string "rw".
+
+ :param str permission: The string which dictates the read, add, create,
+ write, or delete permissions.
+ :return: A BlobSasPermissions object
+ :rtype: ~azure.storage.blob.BlobSasPermissions
+ """
+ p_read = 'r' in permission
+ p_add = 'a' in permission
+ p_create = 'c' in permission
+ p_write = 'w' in permission
+ p_delete = 'd' in permission
+ p_delete_previous_version = 'x' in permission
+ p_tag = 't' in permission
+
+ parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete,
+ delete_previous_version=p_delete_previous_version, tag=p_tag)
+
+ return parsed
+
+
+class CustomerProvidedEncryptionKey(object):
+ """
+ All data in Azure Storage is encrypted at-rest using an account-level encryption key.
+ In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents
+ and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service.
+
+ When you use a customer-provided key, Azure Storage does not manage or persist your key.
+ When writing data to a blob, the provided key is used to encrypt your data before writing it to disk.
+ A SHA-256 hash of the encryption key is written alongside the blob contents,
+ and is used to verify that all subsequent operations against the blob use the same encryption key.
+ This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob.
+ When reading a blob, the provided key is used to decrypt your data after reading it from disk.
+ In both cases, the provided encryption key is securely discarded
+ as soon as the encryption or decryption process completes.
+
+ :param str key_value:
+ Base64-encoded AES-256 encryption key value.
+ :param str key_hash:
+ Base64-encoded SHA256 of the encryption key.
+ :ivar str algorithm:
+ Specifies the algorithm to use when encrypting data using the given key. Must be AES256.
+ """
+ def __init__(self, key_value, key_hash):
+ self.key_value = key_value
+ self.key_hash = key_hash
+ self.algorithm = 'AES256'
+
+
+class ContainerEncryptionScope(object):
+ """The default encryption scope configuration for a container.
+
+ This scope is used implicitly for all future writes within the container,
+ but can be overridden per blob operation.
+
+ .. versionadded:: 12.2.0
+
+ :param str default_encryption_scope:
+ Specifies the default encryption scope to set on the container and use for
+ all future writes.
+ :param bool prevent_encryption_scope_override:
+ If true, prevents any request from specifying a different encryption scope than the scope
+ set on the container. Default value is false.
+ """
+
+ def __init__(self, default_encryption_scope, **kwargs):
+ self.default_encryption_scope = default_encryption_scope
+ self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False)
+
+ @classmethod
+ def _from_generated(cls, generated):
+ if generated.properties.default_encryption_scope:
+ scope = cls(
+ generated.properties.default_encryption_scope,
+ prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False
+ )
+ return scope
+ return None
+
+
+class DelimitedJsonDialect(object):
+ """Defines the input or output JSON serialization for a blob data query.
+
+ :keyword str delimiter: The line separator character, default value is '\n'
+ """
+
+ def __init__(self, **kwargs):
+ self.delimiter = kwargs.pop('delimiter', '\n')
+
+
+class DelimitedTextDialect(object):
+ """Defines the input or output delimited (CSV) serialization for a blob query request.
+
+ :keyword str delimiter:
+ Column separator, defaults to ','.
+ :keyword str quotechar:
+ Field quote, defaults to '"'.
+ :keyword str lineterminator:
+ Record separator, defaults to '\n'.
+ :keyword str escapechar:
+ Escape char, defaults to empty.
+ :keyword bool has_header:
+ Whether the blob data includes headers in the first line. The default value is False, meaning that the
+ data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
+ of the first line.
+ """
+ def __init__(self, **kwargs):
+ self.delimiter = kwargs.pop('delimiter', ',')
+ self.quotechar = kwargs.pop('quotechar', '"')
+ self.lineterminator = kwargs.pop('lineterminator', '\n')
+ self.escapechar = kwargs.pop('escapechar', "")
+ self.has_header = kwargs.pop('has_header', False)
+
+
+class ArrowDialect(ArrowField):
+ """field of an arrow schema.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param ~azure.storage.blob.ArrowType type: Arrow field type.
+ :keyword str name: The name of the field.
+ :keyword int precision: The precision of the field.
+ :keyword int scale: The scale of the field.
+ """
+ def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin
+ super(ArrowDialect, self).__init__(type=type, **kwargs)
+
+
+class ArrowType(str, Enum):
+
+ INT64 = "int64"
+ BOOL = "bool"
+ TIMESTAMP_MS = "timestamp[ms]"
+ STRING = "string"
+ DOUBLE = "double"
+ DECIMAL = 'decimal'
+
+
+class ObjectReplicationPolicy(DictMixin):
+ """Policy id and rule ids applied to a blob.
+
+ :ivar str policy_id:
+ Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair.
+ :ivar list(~azure.storage.blob.ObjectReplicationRule) rules:
+ Within each policy there may be multiple replication rules.
+ e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3
+ """
+
+ def __init__(self, **kwargs):
+ self.policy_id = kwargs.pop('policy_id', None)
+ self.rules = kwargs.pop('rules', None)
+
+
+class ObjectReplicationRule(DictMixin):
+ """Policy id and rule ids applied to a blob.
+
+ :ivar str rule_id:
+ Rule id.
+ :ivar str status:
+ The status of the rule. It could be "Complete" or "Failed"
+ """
+
+ def __init__(self, **kwargs):
+ self.rule_id = kwargs.pop('rule_id', None)
+ self.status = kwargs.pop('status', None)
+
+
+class BlobQueryError(object):
+ """The error happened during quick query operation.
+
+ :ivar str error:
+ The name of the error.
+ :ivar bool is_fatal:
+ If true, this error prevents further query processing. More result data may be returned,
+ but there is no guarantee that all of the original data will be processed.
+ If false, this error does not prevent further query processing.
+ :ivar str description:
+ A description of the error.
+ :ivar int position:
+ The blob offset at which the error occurred.
+ """
+ def __init__(self, error=None, is_fatal=False, description=None, position=None):
+ self.error = error
+ self.is_fatal = is_fatal
+ self.description = description
+ self.position = position
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_quick_query_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_quick_query_helper.py
new file mode 100644
index 00000000000..eb51d987b9a
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_quick_query_helper.py
@@ -0,0 +1,196 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from io import BytesIO
+from typing import Union, Iterable, IO # pylint: disable=unused-import
+
+from ._shared.avro.datafile import DataFileReader
+from ._shared.avro.avro_io import DatumReader
+
+
+class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes
+ """A streaming object to read query results.
+
+ :ivar str name:
+ The name of the blob being quered.
+ :ivar str container:
+ The name of the container where the blob is.
+ :ivar dict response_headers:
+ The response_headers of the quick query request.
+ :ivar bytes record_delimiter:
+ The delimiter used to separate lines, or records with the data. The `records`
+ method will return these lines via a generator.
+ """
+
+ def __init__(
+ self,
+ name=None,
+ container=None,
+ errors=None,
+ record_delimiter='\n',
+ encoding=None,
+ headers=None,
+ response=None,
+ error_cls=None,
+ ):
+ self.name = name
+ self.container = container
+ self.response_headers = headers
+ self.record_delimiter = record_delimiter
+ self._size = 0
+ self._bytes_processed = 0
+ self._errors = errors
+ self._encoding = encoding
+ self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader())
+ self._first_result = self._process_record(next(self._parsed_results))
+ self._error_cls = error_cls
+
+ def __len__(self):
+ return self._size
+
+ def _process_record(self, result):
+ self._size = result.get('totalBytes', self._size)
+ self._bytes_processed = result.get('bytesScanned', self._bytes_processed)
+ if 'data' in result:
+ return result.get('data')
+ if 'fatal' in result:
+ error = self._error_cls(
+ error=result['name'],
+ is_fatal=result['fatal'],
+ description=result['description'],
+ position=result['position']
+ )
+ if self._errors:
+ self._errors(error)
+ return None
+
+ def _iter_stream(self):
+ if self._first_result is not None:
+ yield self._first_result
+ for next_result in self._parsed_results:
+ processed_result = self._process_record(next_result)
+ if processed_result is not None:
+ yield processed_result
+
+ def readall(self):
+ # type: () -> Union[bytes, str]
+ """Return all query results.
+
+ This operation is blocking until all data is downloaded.
+ If encoding has been configured - this will be used to decode individual
+ records are they are received.
+
+ :rtype: Union[bytes, str]
+ """
+ stream = BytesIO()
+ self.readinto(stream)
+ data = stream.getvalue()
+ if self._encoding:
+ return data.decode(self._encoding)
+ return data
+
+ def readinto(self, stream):
+ # type: (IO) -> None
+ """Download the query result to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream.
+ :returns: None
+ """
+ for record in self._iter_stream():
+ stream.write(record)
+
+ def records(self):
+ # type: () -> Iterable[Union[bytes, str]]
+ """Returns a record generator for the query result.
+
+ Records will be returned line by line.
+ If encoding has been configured - this will be used to decode individual
+ records are they are received.
+
+ :rtype: Iterable[Union[bytes, str]]
+ """
+ delimiter = self.record_delimiter.encode('utf-8')
+ for record_chunk in self._iter_stream():
+ for record in record_chunk.split(delimiter):
+ if self._encoding:
+ yield record.decode(self._encoding)
+ else:
+ yield record
+
+
+
+class QuickQueryStreamer(object):
+ """
+ File-like streaming iterator.
+ """
+
+ def __init__(self, generator):
+ self.generator = generator
+ self.iterator = iter(generator)
+ self._buf = b""
+ self._point = 0
+ self._download_offset = 0
+ self._buf_start = 0
+ self.file_length = None
+
+ def __len__(self):
+ return self.file_length
+
+ def __iter__(self):
+ return self.iterator
+
+ @staticmethod
+ def seekable():
+ return True
+
+ def __next__(self):
+ next_part = next(self.iterator)
+ self._download_offset += len(next_part)
+ return next_part
+
+ next = __next__ # Python 2 compatibility.
+
+ def tell(self):
+ return self._point
+
+ def seek(self, offset, whence=0):
+ if whence == 0:
+ self._point = offset
+ elif whence == 1:
+ self._point += offset
+ else:
+ raise ValueError("whence must be 0, or 1")
+ if self._point < 0:
+ self._point = 0 # XXX is this right?
+
+ def read(self, size):
+ try:
+ # keep reading from the generator until the buffer of this stream has enough data to read
+ while self._point + size > self._download_offset:
+ self._buf += self.__next__()
+ except StopIteration:
+ self.file_length = self._download_offset
+
+ start_point = self._point
+
+ # EOF
+ self._point = min(self._point + size, self._download_offset)
+
+ relative_start = start_point - self._buf_start
+ if relative_start < 0:
+ raise ValueError("Buffer has dumped too much data")
+ relative_end = relative_start + size
+ data = self._buf[relative_start: relative_end]
+
+ # dump the extra data in buffer
+ # buffer start--------------------16bytes----current read position
+ dumped_size = max(relative_end - 16 - relative_start, 0)
+ self._buf_start += dumped_size
+ self._buf = self._buf[dumped_size:]
+
+ return data
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_serialize.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_serialize.py
new file mode 100644
index 00000000000..a4b13dad938
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_serialize.py
@@ -0,0 +1,196 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+try:
+ from urllib.parse import quote
+except ImportError:
+ from urllib2 import quote # type: ignore
+
+from azure.core import MatchConditions
+
+from ._models import (
+ ContainerEncryptionScope,
+ DelimitedJsonDialect)
+from ._generated.models import (
+ ModifiedAccessConditions,
+ SourceModifiedAccessConditions,
+ CpkScopeInfo,
+ ContainerCpkScopeInfo,
+ QueryFormat,
+ QuerySerialization,
+ DelimitedTextConfiguration,
+ JsonTextConfiguration,
+ ArrowConfiguration,
+ QueryFormatType,
+ BlobTag,
+ BlobTags, LeaseAccessConditions
+)
+
+
+_SUPPORTED_API_VERSIONS = [
+ '2019-02-02',
+ '2019-07-07',
+ '2019-10-10',
+ '2019-12-12',
+ '2020-02-10',
+]
+
+
+def _get_match_headers(kwargs, match_param, etag_param):
+ # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str])
+ if_match = None
+ if_none_match = None
+ match_condition = kwargs.pop(match_param, None)
+ if match_condition == MatchConditions.IfNotModified:
+ if_match = kwargs.pop(etag_param, None)
+ if not if_match:
+ raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
+ elif match_condition == MatchConditions.IfPresent:
+ if_match = '*'
+ elif match_condition == MatchConditions.IfModified:
+ if_none_match = kwargs.pop(etag_param, None)
+ if not if_none_match:
+ raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param))
+ elif match_condition == MatchConditions.IfMissing:
+ if_none_match = '*'
+ elif match_condition is None:
+ if kwargs.get(etag_param):
+ raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param))
+ else:
+ raise TypeError("Invalid match condition: {}".format(match_condition))
+ return if_match, if_none_match
+
+
+def get_access_conditions(lease):
+ # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None]
+ try:
+ lease_id = lease.id # type: ignore
+ except AttributeError:
+ lease_id = lease # type: ignore
+ return LeaseAccessConditions(lease_id=lease_id) if lease_id else None
+
+
+def get_modify_conditions(kwargs):
+ # type: (Dict[str, Any]) -> ModifiedAccessConditions
+ if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag')
+ return ModifiedAccessConditions(
+ if_modified_since=kwargs.pop('if_modified_since', None),
+ if_unmodified_since=kwargs.pop('if_unmodified_since', None),
+ if_match=if_match or kwargs.pop('if_match', None),
+ if_none_match=if_none_match or kwargs.pop('if_none_match', None),
+ if_tags=kwargs.pop('if_tags_match_condition', None)
+ )
+
+
+def get_source_conditions(kwargs):
+ # type: (Dict[str, Any]) -> SourceModifiedAccessConditions
+ if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag')
+ return SourceModifiedAccessConditions(
+ source_if_modified_since=kwargs.pop('source_if_modified_since', None),
+ source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None),
+ source_if_match=if_match or kwargs.pop('source_if_match', None),
+ source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None),
+ source_if_tags=kwargs.pop('source_if_tags_match_condition', None)
+ )
+
+
+def get_cpk_scope_info(kwargs):
+ # type: (Dict[str, Any]) -> CpkScopeInfo
+ if 'encryption_scope' in kwargs:
+ return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope'))
+ return None
+
+
+def get_container_cpk_scope_info(kwargs):
+ # type: (Dict[str, Any]) -> ContainerCpkScopeInfo
+ encryption_scope = kwargs.pop('container_encryption_scope', None)
+ if encryption_scope:
+ if isinstance(encryption_scope, ContainerEncryptionScope):
+ return ContainerCpkScopeInfo(
+ default_encryption_scope=encryption_scope.default_encryption_scope,
+ prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override
+ )
+ if isinstance(encryption_scope, dict):
+ return ContainerCpkScopeInfo(
+ default_encryption_scope=encryption_scope['default_encryption_scope'],
+ prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override')
+ )
+ raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.")
+ return None
+
+
+def get_api_version(kwargs, default):
+ # type: (Dict[str, Any]) -> str
+ api_version = kwargs.pop('api_version', None)
+ if api_version and api_version not in _SUPPORTED_API_VERSIONS:
+ versions = '\n'.join(_SUPPORTED_API_VERSIONS)
+ raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions))
+ return api_version or default
+
+
+def serialize_blob_tags_header(tags=None):
+ # type: (Optional[Dict[str, str]]) -> str
+ if tags is None:
+ return None
+
+ components = list()
+ if tags:
+ for key, value in tags.items():
+ components.append(quote(key, safe='.-'))
+ components.append('=')
+ components.append(quote(value, safe='.-'))
+ components.append('&')
+
+ if components:
+ del components[-1]
+
+ return ''.join(components)
+
+
+def serialize_blob_tags(tags=None):
+ # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None]
+ tag_list = list()
+ if tags:
+ tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()]
+ return BlobTags(blob_tag_set=tag_list)
+
+
+def serialize_query_format(formater):
+ if isinstance(formater, DelimitedJsonDialect):
+ serialization_settings = JsonTextConfiguration(
+ record_separator=formater.delimiter
+ )
+ qq_format = QueryFormat(
+ type=QueryFormatType.json,
+ json_text_configuration=serialization_settings)
+ elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well
+ try:
+ headers = formater.has_header
+ except AttributeError:
+ headers = False
+ serialization_settings = DelimitedTextConfiguration(
+ column_separator=formater.delimiter,
+ field_quote=formater.quotechar,
+ record_separator=formater.lineterminator,
+ escape_char=formater.escapechar,
+ headers_present=headers
+ )
+ qq_format = QueryFormat(
+ type=QueryFormatType.delimited,
+ delimited_text_configuration=serialization_settings
+ )
+ elif isinstance(formater, list):
+ serialization_settings = ArrowConfiguration(
+ schema=formater
+ )
+ qq_format = QueryFormat(
+ type=QueryFormatType.arrow,
+ arrow_configuration=serialization_settings)
+ elif not formater:
+ return None
+ else:
+ raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.")
+ return QuerySerialization(format=qq_format)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/__init__.py
new file mode 100644
index 00000000000..160f8822382
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/__init__.py
@@ -0,0 +1,56 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import hmac
+
+try:
+ from urllib.parse import quote, unquote
+except ImportError:
+ from urllib2 import quote, unquote # type: ignore
+
+import six
+
+
+def url_quote(url):
+ return quote(url)
+
+
+def url_unquote(url):
+ return unquote(url)
+
+
+def encode_base64(data):
+ if isinstance(data, six.text_type):
+ data = data.encode('utf-8')
+ encoded = base64.b64encode(data)
+ return encoded.decode('utf-8')
+
+
+def decode_base64_to_bytes(data):
+ if isinstance(data, six.text_type):
+ data = data.encode('utf-8')
+ return base64.b64decode(data)
+
+
+def decode_base64_to_text(data):
+ decoded_bytes = decode_base64_to_bytes(data)
+ return decoded_bytes.decode('utf-8')
+
+
+def sign_string(key, string_to_sign, key_is_base64=True):
+ if key_is_base64:
+ key = decode_base64_to_bytes(key)
+ else:
+ if isinstance(key, six.text_type):
+ key = key.encode('utf-8')
+ if isinstance(string_to_sign, six.text_type):
+ string_to_sign = string_to_sign.encode('utf-8')
+ signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)
+ digest = signed_hmac_sha256.digest()
+ encoded_digest = encode_base64(digest)
+ return encoded_digest
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/authentication.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/authentication.py
new file mode 100644
index 00000000000..d04c1e4fb53
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/authentication.py
@@ -0,0 +1,142 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import logging
+import sys
+
+try:
+ from urllib.parse import urlparse, unquote
+except ImportError:
+ from urlparse import urlparse # type: ignore
+ from urllib2 import unquote # type: ignore
+
+try:
+ from yarl import URL
+except ImportError:
+ pass
+
+try:
+ from azure.core.pipeline.transport import AioHttpTransport
+except ImportError:
+ AioHttpTransport = None
+
+from azure.core.exceptions import ClientAuthenticationError
+from azure.core.pipeline.policies import SansIOHTTPPolicy
+
+from . import sign_string
+
+
+logger = logging.getLogger(__name__)
+
+
+
+# wraps a given exception with the desired exception type
+def _wrap_exception(ex, desired_type):
+ msg = ""
+ if ex.args:
+ msg = ex.args[0]
+ if sys.version_info >= (3,):
+ # Automatic chaining in Python 3 means we keep the trace
+ return desired_type(msg)
+ # There isn't a good solution in 2 for keeping the stack trace
+ # in general, or that will not result in an error in 3
+ # However, we can keep the previous error type and message
+ # TODO: In the future we will log the trace
+ return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
+
+
+class AzureSigningError(ClientAuthenticationError):
+ """
+ Represents a fatal error when attempting to sign a request.
+ In general, the cause of this exception is user error. For example, the given account key is not valid.
+ Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
+ """
+
+
+# pylint: disable=no-self-use
+class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
+
+ def __init__(self, account_name, account_key):
+ self.account_name = account_name
+ self.account_key = account_key
+ super(SharedKeyCredentialPolicy, self).__init__()
+
+ @staticmethod
+ def _get_headers(request, headers_to_sign):
+ headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
+ if 'content-length' in headers and headers['content-length'] == '0':
+ del headers['content-length']
+ return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
+
+ @staticmethod
+ def _get_verb(request):
+ return request.http_request.method + '\n'
+
+ def _get_canonicalized_resource(self, request):
+ uri_path = urlparse(request.http_request.url).path
+ try:
+ if isinstance(request.context.transport, AioHttpTransport) or \
+ isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \
+ isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None),
+ AioHttpTransport):
+ uri_path = URL(uri_path)
+ return '/' + self.account_name + str(uri_path)
+ except TypeError:
+ pass
+ return '/' + self.account_name + uri_path
+
+ @staticmethod
+ def _get_canonicalized_headers(request):
+ string_to_sign = ''
+ x_ms_headers = []
+ for name, value in request.http_request.headers.items():
+ if name.startswith('x-ms-'):
+ x_ms_headers.append((name.lower(), value))
+ x_ms_headers.sort()
+ for name, value in x_ms_headers:
+ if value is not None:
+ string_to_sign += ''.join([name, ':', value, '\n'])
+ return string_to_sign
+
+ @staticmethod
+ def _get_canonicalized_resource_query(request):
+ sorted_queries = list(request.http_request.query.items())
+ sorted_queries.sort()
+
+ string_to_sign = ''
+ for name, value in sorted_queries:
+ if value is not None:
+ string_to_sign += '\n' + name.lower() + ':' + unquote(value)
+
+ return string_to_sign
+
+ def _add_authorization_header(self, request, string_to_sign):
+ try:
+ signature = sign_string(self.account_key, string_to_sign)
+ auth_string = 'SharedKey ' + self.account_name + ':' + signature
+ request.http_request.headers['Authorization'] = auth_string
+ except Exception as ex:
+ # Wrap any error that occurred as signing error
+ # Doing so will clarify/locate the source of problem
+ raise _wrap_exception(ex, AzureSigningError)
+
+ def on_request(self, request):
+ string_to_sign = \
+ self._get_verb(request) + \
+ self._get_headers(
+ request,
+ [
+ 'content-encoding', 'content-language', 'content-length',
+ 'content-md5', 'content-type', 'date', 'if-modified-since',
+ 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
+ ]
+ ) + \
+ self._get_canonicalized_headers(request) + \
+ self._get_canonicalized_resource(request) + \
+ self._get_canonicalized_resource_query(request)
+
+ self._add_authorization_header(request, string_to_sign)
+ #logger.debug("String_to_sign=%s", string_to_sign)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/__init__.py
new file mode 100644
index 00000000000..5b396cd202e
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/__init__.py
@@ -0,0 +1,5 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/avro_io.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/avro_io.py
new file mode 100644
index 00000000000..93a5c134849
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/avro_io.py
@@ -0,0 +1,464 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+"""Input/output utilities.
+
+Includes:
+ - i/o-specific constants
+ - i/o-specific exceptions
+ - schema validation
+ - leaf value encoding and decoding
+ - datum reader/writer stuff (?)
+
+Also includes a generic representation for data, which uses the
+following mapping:
+ - Schema records are implemented as dict.
+ - Schema arrays are implemented as list.
+ - Schema maps are implemented as dict.
+ - Schema strings are implemented as unicode.
+ - Schema bytes are implemented as str.
+ - Schema ints are implemented as int.
+ - Schema longs are implemented as long.
+ - Schema floats are implemented as float.
+ - Schema doubles are implemented as float.
+ - Schema booleans are implemented as bool.
+"""
+
+import json
+import logging
+import struct
+import sys
+
+from ..avro import schema
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+STRUCT_FLOAT = struct.Struct('= 0), n
+ input_bytes = self.reader.read(n)
+ if n > 0 and not input_bytes:
+ raise StopIteration
+ assert (len(input_bytes) == n), input_bytes
+ return input_bytes
+
+ @staticmethod
+ def read_null():
+ """
+ null is written as zero bytes
+ """
+ return None
+
+ def read_boolean(self):
+ """
+ a boolean is written as a single byte
+ whose value is either 0 (false) or 1 (true).
+ """
+ b = ord(self.read(1))
+ if b == 1:
+ return True
+ if b == 0:
+ return False
+ fail_msg = "Invalid value for boolean: %s" % b
+ raise schema.AvroException(fail_msg)
+
+ def read_int(self):
+ """
+ int and long values are written using variable-length, zig-zag coding.
+ """
+ return self.read_long()
+
+ def read_long(self):
+ """
+ int and long values are written using variable-length, zig-zag coding.
+ """
+ b = ord(self.read(1))
+ n = b & 0x7F
+ shift = 7
+ while (b & 0x80) != 0:
+ b = ord(self.read(1))
+ n |= (b & 0x7F) << shift
+ shift += 7
+ datum = (n >> 1) ^ -(n & 1)
+ return datum
+
+ def read_float(self):
+ """
+ A float is written as 4 bytes.
+ The float is converted into a 32-bit integer using a method equivalent to
+ Java's floatToIntBits and then encoded in little-endian format.
+ """
+ return STRUCT_FLOAT.unpack(self.read(4))[0]
+
+ def read_double(self):
+ """
+ A double is written as 8 bytes.
+ The double is converted into a 64-bit integer using a method equivalent to
+ Java's doubleToLongBits and then encoded in little-endian format.
+ """
+ return STRUCT_DOUBLE.unpack(self.read(8))[0]
+
+ def read_bytes(self):
+ """
+ Bytes are encoded as a long followed by that many bytes of data.
+ """
+ nbytes = self.read_long()
+ assert (nbytes >= 0), nbytes
+ return self.read(nbytes)
+
+ def read_utf8(self):
+ """
+ A string is encoded as a long followed by
+ that many bytes of UTF-8 encoded character data.
+ """
+ input_bytes = self.read_bytes()
+ if PY3:
+ try:
+ return input_bytes.decode('utf-8')
+ except UnicodeDecodeError as exn:
+ logger.error('Invalid UTF-8 input bytes: %r', input_bytes)
+ raise exn
+ else:
+ # PY2
+ return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable
+
+ def skip_null(self):
+ pass
+
+ def skip_boolean(self):
+ self.skip(1)
+
+ def skip_int(self):
+ self.skip_long()
+
+ def skip_long(self):
+ b = ord(self.read(1))
+ while (b & 0x80) != 0:
+ b = ord(self.read(1))
+
+ def skip_float(self):
+ self.skip(4)
+
+ def skip_double(self):
+ self.skip(8)
+
+ def skip_bytes(self):
+ self.skip(self.read_long())
+
+ def skip_utf8(self):
+ self.skip_bytes()
+
+ def skip(self, n):
+ self.reader.seek(self.reader.tell() + n)
+
+
+# ------------------------------------------------------------------------------
+# DatumReader
+
+
+class DatumReader(object):
+ """Deserialize Avro-encoded data into a Python data structure."""
+
+ def __init__(self, writer_schema=None):
+ """
+ As defined in the Avro specification, we call the schema encoded
+ in the data the "writer's schema".
+ """
+ self._writer_schema = writer_schema
+
+ # read/write properties
+ def set_writer_schema(self, writer_schema):
+ self._writer_schema = writer_schema
+
+ writer_schema = property(lambda self: self._writer_schema,
+ set_writer_schema)
+
+ def read(self, decoder):
+ return self.read_data(self.writer_schema, decoder)
+
+ def read_data(self, writer_schema, decoder):
+ # function dispatch for reading data based on type of writer's schema
+ if writer_schema.type == 'null':
+ result = decoder.read_null()
+ elif writer_schema.type == 'boolean':
+ result = decoder.read_boolean()
+ elif writer_schema.type == 'string':
+ result = decoder.read_utf8()
+ elif writer_schema.type == 'int':
+ result = decoder.read_int()
+ elif writer_schema.type == 'long':
+ result = decoder.read_long()
+ elif writer_schema.type == 'float':
+ result = decoder.read_float()
+ elif writer_schema.type == 'double':
+ result = decoder.read_double()
+ elif writer_schema.type == 'bytes':
+ result = decoder.read_bytes()
+ elif writer_schema.type == 'fixed':
+ result = self.read_fixed(writer_schema, decoder)
+ elif writer_schema.type == 'enum':
+ result = self.read_enum(writer_schema, decoder)
+ elif writer_schema.type == 'array':
+ result = self.read_array(writer_schema, decoder)
+ elif writer_schema.type == 'map':
+ result = self.read_map(writer_schema, decoder)
+ elif writer_schema.type in ['union', 'error_union']:
+ result = self.read_union(writer_schema, decoder)
+ elif writer_schema.type in ['record', 'error', 'request']:
+ result = self.read_record(writer_schema, decoder)
+ else:
+ fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type
+ raise schema.AvroException(fail_msg)
+ return result
+
+ def skip_data(self, writer_schema, decoder):
+ if writer_schema.type == 'null':
+ result = decoder.skip_null()
+ elif writer_schema.type == 'boolean':
+ result = decoder.skip_boolean()
+ elif writer_schema.type == 'string':
+ result = decoder.skip_utf8()
+ elif writer_schema.type == 'int':
+ result = decoder.skip_int()
+ elif writer_schema.type == 'long':
+ result = decoder.skip_long()
+ elif writer_schema.type == 'float':
+ result = decoder.skip_float()
+ elif writer_schema.type == 'double':
+ result = decoder.skip_double()
+ elif writer_schema.type == 'bytes':
+ result = decoder.skip_bytes()
+ elif writer_schema.type == 'fixed':
+ result = self.skip_fixed(writer_schema, decoder)
+ elif writer_schema.type == 'enum':
+ result = self.skip_enum(decoder)
+ elif writer_schema.type == 'array':
+ self.skip_array(writer_schema, decoder)
+ result = None
+ elif writer_schema.type == 'map':
+ self.skip_map(writer_schema, decoder)
+ result = None
+ elif writer_schema.type in ['union', 'error_union']:
+ result = self.skip_union(writer_schema, decoder)
+ elif writer_schema.type in ['record', 'error', 'request']:
+ self.skip_record(writer_schema, decoder)
+ result = None
+ else:
+ fail_msg = "Unknown schema type: %s" % writer_schema.type
+ raise schema.AvroException(fail_msg)
+ return result
+
+ @staticmethod
+ def read_fixed(writer_schema, decoder):
+ """
+ Fixed instances are encoded using the number of bytes declared
+ in the schema.
+ """
+ return decoder.read(writer_schema.size)
+
+ @staticmethod
+ def skip_fixed(writer_schema, decoder):
+ return decoder.skip(writer_schema.size)
+
+ @staticmethod
+ def read_enum(writer_schema, decoder):
+ """
+ An enum is encoded by a int, representing the zero-based position
+ of the symbol in the schema.
+ """
+ # read data
+ index_of_symbol = decoder.read_int()
+ if index_of_symbol >= len(writer_schema.symbols):
+ fail_msg = "Can't access enum index %d for enum with %d symbols" \
+ % (index_of_symbol, len(writer_schema.symbols))
+ raise SchemaResolutionException(fail_msg, writer_schema)
+ read_symbol = writer_schema.symbols[index_of_symbol]
+ return read_symbol
+
+ @staticmethod
+ def skip_enum(decoder):
+ return decoder.skip_int()
+
+ def read_array(self, writer_schema, decoder):
+ """
+ Arrays are encoded as a series of blocks.
+
+ Each block consists of a long count value,
+ followed by that many array items.
+ A block with count zero indicates the end of the array.
+ Each item is encoded per the array's item schema.
+
+ If a block's count is negative,
+ then the count is followed immediately by a long block size,
+ indicating the number of bytes in the block.
+ The actual count in this case
+ is the absolute value of the count written.
+ """
+ read_items = []
+ block_count = decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_count = -block_count
+ decoder.read_long()
+ for _ in range(block_count):
+ read_items.append(self.read_data(writer_schema.items, decoder))
+ block_count = decoder.read_long()
+ return read_items
+
+ def skip_array(self, writer_schema, decoder):
+ block_count = decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_size = decoder.read_long()
+ decoder.skip(block_size)
+ else:
+ for _ in range(block_count):
+ self.skip_data(writer_schema.items, decoder)
+ block_count = decoder.read_long()
+
+ def read_map(self, writer_schema, decoder):
+ """
+ Maps are encoded as a series of blocks.
+
+ Each block consists of a long count value,
+ followed by that many key/value pairs.
+ A block with count zero indicates the end of the map.
+ Each item is encoded per the map's value schema.
+
+ If a block's count is negative,
+ then the count is followed immediately by a long block size,
+ indicating the number of bytes in the block.
+ The actual count in this case
+ is the absolute value of the count written.
+ """
+ read_items = {}
+ block_count = decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_count = -block_count
+ decoder.read_long()
+ for _ in range(block_count):
+ key = decoder.read_utf8()
+ read_items[key] = self.read_data(writer_schema.values, decoder)
+ block_count = decoder.read_long()
+ return read_items
+
+ def skip_map(self, writer_schema, decoder):
+ block_count = decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_size = decoder.read_long()
+ decoder.skip(block_size)
+ else:
+ for _ in range(block_count):
+ decoder.skip_utf8()
+ self.skip_data(writer_schema.values, decoder)
+ block_count = decoder.read_long()
+
+ def read_union(self, writer_schema, decoder):
+ """
+ A union is encoded by first writing a long value indicating
+ the zero-based position within the union of the schema of its value.
+ The value is then encoded per the indicated schema within the union.
+ """
+ # schema resolution
+ index_of_schema = int(decoder.read_long())
+ if index_of_schema >= len(writer_schema.schemas):
+ fail_msg = "Can't access branch index %d for union with %d branches" \
+ % (index_of_schema, len(writer_schema.schemas))
+ raise SchemaResolutionException(fail_msg, writer_schema)
+ selected_writer_schema = writer_schema.schemas[index_of_schema]
+
+ # read data
+ return self.read_data(selected_writer_schema, decoder)
+
+ def skip_union(self, writer_schema, decoder):
+ index_of_schema = int(decoder.read_long())
+ if index_of_schema >= len(writer_schema.schemas):
+ fail_msg = "Can't access branch index %d for union with %d branches" \
+ % (index_of_schema, len(writer_schema.schemas))
+ raise SchemaResolutionException(fail_msg, writer_schema)
+ return self.skip_data(writer_schema.schemas[index_of_schema], decoder)
+
+ def read_record(self, writer_schema, decoder):
+ """
+ A record is encoded by encoding the values of its fields
+ in the order that they are declared. In other words, a record
+ is encoded as just the concatenation of the encodings of its fields.
+ Field values are encoded per their schema.
+
+ Schema Resolution:
+ * the ordering of fields may be different: fields are matched by name.
+ * schemas for fields with the same name in both records are resolved
+ recursively.
+ * if the writer's record contains a field with a name not present in the
+ reader's record, the writer's value for that field is ignored.
+ * if the reader's record schema has a field that contains a default value,
+ and writer's schema does not have a field with the same name, then the
+ reader should use the default value from its field.
+ * if the reader's record schema has a field with no default value, and
+ writer's schema does not have a field with the same name, then the
+ field's value is unset.
+ """
+ # schema resolution
+ read_record = {}
+ for field in writer_schema.fields:
+ field_val = self.read_data(field.type, decoder)
+ read_record[field.name] = field_val
+ return read_record
+
+ def skip_record(self, writer_schema, decoder):
+ for field in writer_schema.fields:
+ self.skip_data(field.type, decoder)
+
+
+# ------------------------------------------------------------------------------
+
+if __name__ == '__main__':
+ raise Exception('Not a standalone module')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/avro_io_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/avro_io_async.py
new file mode 100644
index 00000000000..e9812163795
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/avro_io_async.py
@@ -0,0 +1,448 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+"""Input/output utilities.
+
+Includes:
+ - i/o-specific constants
+ - i/o-specific exceptions
+ - schema validation
+ - leaf value encoding and decoding
+ - datum reader/writer stuff (?)
+
+Also includes a generic representation for data, which uses the
+following mapping:
+ - Schema records are implemented as dict.
+ - Schema arrays are implemented as list.
+ - Schema maps are implemented as dict.
+ - Schema strings are implemented as unicode.
+ - Schema bytes are implemented as str.
+ - Schema ints are implemented as int.
+ - Schema longs are implemented as long.
+ - Schema floats are implemented as float.
+ - Schema doubles are implemented as float.
+ - Schema booleans are implemented as bool.
+"""
+
+import logging
+import sys
+
+from ..avro import schema
+
+from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Decoder
+
+
+class AsyncBinaryDecoder(object):
+ """Read leaf values."""
+
+ def __init__(self, reader):
+ """
+ reader is a Python object on which we can call read, seek, and tell.
+ """
+ self._reader = reader
+
+ @property
+ def reader(self):
+ """Reports the reader used by this decoder."""
+ return self._reader
+
+ async def read(self, n):
+ """Read n bytes.
+
+ Args:
+ n: Number of bytes to read.
+ Returns:
+ The next n bytes from the input.
+ """
+ assert (n >= 0), n
+ input_bytes = await self.reader.read(n)
+ if n > 0 and not input_bytes:
+ raise StopAsyncIteration
+ assert (len(input_bytes) == n), input_bytes
+ return input_bytes
+
+ @staticmethod
+ def read_null():
+ """
+ null is written as zero bytes
+ """
+ return None
+
+ async def read_boolean(self):
+ """
+ a boolean is written as a single byte
+ whose value is either 0 (false) or 1 (true).
+ """
+ b = ord(await self.read(1))
+ if b == 1:
+ return True
+ if b == 0:
+ return False
+ fail_msg = "Invalid value for boolean: %s" % b
+ raise schema.AvroException(fail_msg)
+
+ async def read_int(self):
+ """
+ int and long values are written using variable-length, zig-zag coding.
+ """
+ return await self.read_long()
+
+ async def read_long(self):
+ """
+ int and long values are written using variable-length, zig-zag coding.
+ """
+ b = ord(await self.read(1))
+ n = b & 0x7F
+ shift = 7
+ while (b & 0x80) != 0:
+ b = ord(await self.read(1))
+ n |= (b & 0x7F) << shift
+ shift += 7
+ datum = (n >> 1) ^ -(n & 1)
+ return datum
+
+ async def read_float(self):
+ """
+ A float is written as 4 bytes.
+ The float is converted into a 32-bit integer using a method equivalent to
+ Java's floatToIntBits and then encoded in little-endian format.
+ """
+ return STRUCT_FLOAT.unpack(await self.read(4))[0]
+
+ async def read_double(self):
+ """
+ A double is written as 8 bytes.
+ The double is converted into a 64-bit integer using a method equivalent to
+ Java's doubleToLongBits and then encoded in little-endian format.
+ """
+ return STRUCT_DOUBLE.unpack(await self.read(8))[0]
+
+ async def read_bytes(self):
+ """
+ Bytes are encoded as a long followed by that many bytes of data.
+ """
+ nbytes = await self.read_long()
+ assert (nbytes >= 0), nbytes
+ return await self.read(nbytes)
+
+ async def read_utf8(self):
+ """
+ A string is encoded as a long followed by
+ that many bytes of UTF-8 encoded character data.
+ """
+ input_bytes = await self.read_bytes()
+ if PY3:
+ try:
+ return input_bytes.decode('utf-8')
+ except UnicodeDecodeError as exn:
+ logger.error('Invalid UTF-8 input bytes: %r', input_bytes)
+ raise exn
+ else:
+ # PY2
+ return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable
+
+ def skip_null(self):
+ pass
+
+ async def skip_boolean(self):
+ await self.skip(1)
+
+ async def skip_int(self):
+ await self.skip_long()
+
+ async def skip_long(self):
+ b = ord(await self.read(1))
+ while (b & 0x80) != 0:
+ b = ord(await self.read(1))
+
+ async def skip_float(self):
+ await self.skip(4)
+
+ async def skip_double(self):
+ await self.skip(8)
+
+ async def skip_bytes(self):
+ await self.skip(await self.read_long())
+
+ async def skip_utf8(self):
+ await self.skip_bytes()
+
+ async def skip(self, n):
+ await self.reader.seek(await self.reader.tell() + n)
+
+
+# ------------------------------------------------------------------------------
+# DatumReader
+
+
+class AsyncDatumReader(object):
+ """Deserialize Avro-encoded data into a Python data structure."""
+
+ def __init__(self, writer_schema=None):
+ """
+ As defined in the Avro specification, we call the schema encoded
+ in the data the "writer's schema", and the schema expected by the
+ reader the "reader's schema".
+ """
+ self._writer_schema = writer_schema
+
+ # read/write properties
+ def set_writer_schema(self, writer_schema):
+ self._writer_schema = writer_schema
+
+ writer_schema = property(lambda self: self._writer_schema,
+ set_writer_schema)
+
+ async def read(self, decoder):
+ return await self.read_data(self.writer_schema, decoder)
+
+ async def read_data(self, writer_schema, decoder):
+ # function dispatch for reading data based on type of writer's schema
+ if writer_schema.type == 'null':
+ result = decoder.read_null()
+ elif writer_schema.type == 'boolean':
+ result = await decoder.read_boolean()
+ elif writer_schema.type == 'string':
+ result = await decoder.read_utf8()
+ elif writer_schema.type == 'int':
+ result = await decoder.read_int()
+ elif writer_schema.type == 'long':
+ result = await decoder.read_long()
+ elif writer_schema.type == 'float':
+ result = await decoder.read_float()
+ elif writer_schema.type == 'double':
+ result = await decoder.read_double()
+ elif writer_schema.type == 'bytes':
+ result = await decoder.read_bytes()
+ elif writer_schema.type == 'fixed':
+ result = await self.read_fixed(writer_schema, decoder)
+ elif writer_schema.type == 'enum':
+ result = await self.read_enum(writer_schema, decoder)
+ elif writer_schema.type == 'array':
+ result = await self.read_array(writer_schema, decoder)
+ elif writer_schema.type == 'map':
+ result = await self.read_map(writer_schema, decoder)
+ elif writer_schema.type in ['union', 'error_union']:
+ result = await self.read_union(writer_schema, decoder)
+ elif writer_schema.type in ['record', 'error', 'request']:
+ result = await self.read_record(writer_schema, decoder)
+ else:
+ fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type
+ raise schema.AvroException(fail_msg)
+ return result
+
+ async def skip_data(self, writer_schema, decoder):
+ if writer_schema.type == 'null':
+ result = decoder.skip_null()
+ elif writer_schema.type == 'boolean':
+ result = await decoder.skip_boolean()
+ elif writer_schema.type == 'string':
+ result = await decoder.skip_utf8()
+ elif writer_schema.type == 'int':
+ result = await decoder.skip_int()
+ elif writer_schema.type == 'long':
+ result = await decoder.skip_long()
+ elif writer_schema.type == 'float':
+ result = await decoder.skip_float()
+ elif writer_schema.type == 'double':
+ result = await decoder.skip_double()
+ elif writer_schema.type == 'bytes':
+ result = await decoder.skip_bytes()
+ elif writer_schema.type == 'fixed':
+ result = await self.skip_fixed(writer_schema, decoder)
+ elif writer_schema.type == 'enum':
+ result = await self.skip_enum(decoder)
+ elif writer_schema.type == 'array':
+ await self.skip_array(writer_schema, decoder)
+ result = None
+ elif writer_schema.type == 'map':
+ await self.skip_map(writer_schema, decoder)
+ result = None
+ elif writer_schema.type in ['union', 'error_union']:
+ result = await self.skip_union(writer_schema, decoder)
+ elif writer_schema.type in ['record', 'error', 'request']:
+ await self.skip_record(writer_schema, decoder)
+ result = None
+ else:
+ fail_msg = "Unknown schema type: %s" % writer_schema.type
+ raise schema.AvroException(fail_msg)
+ return result
+
+ @staticmethod
+ async def read_fixed(writer_schema, decoder):
+ """
+ Fixed instances are encoded using the number of bytes declared
+ in the schema.
+ """
+ return await decoder.read(writer_schema.size)
+
+ @staticmethod
+ async def skip_fixed(writer_schema, decoder):
+ return await decoder.skip(writer_schema.size)
+
+ @staticmethod
+ async def read_enum(writer_schema, decoder):
+ """
+ An enum is encoded by a int, representing the zero-based position
+ of the symbol in the schema.
+ """
+ # read data
+ index_of_symbol = await decoder.read_int()
+ if index_of_symbol >= len(writer_schema.symbols):
+ fail_msg = "Can't access enum index %d for enum with %d symbols" \
+ % (index_of_symbol, len(writer_schema.symbols))
+ raise SchemaResolutionException(fail_msg, writer_schema)
+ read_symbol = writer_schema.symbols[index_of_symbol]
+ return read_symbol
+
+ @staticmethod
+ async def skip_enum(decoder):
+ return await decoder.skip_int()
+
+ async def read_array(self, writer_schema, decoder):
+ """
+ Arrays are encoded as a series of blocks.
+
+ Each block consists of a long count value,
+ followed by that many array items.
+ A block with count zero indicates the end of the array.
+ Each item is encoded per the array's item schema.
+
+ If a block's count is negative,
+ then the count is followed immediately by a long block size,
+ indicating the number of bytes in the block.
+ The actual count in this case
+ is the absolute value of the count written.
+ """
+ read_items = []
+ block_count = await decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_count = -block_count
+ await decoder.read_long()
+ for _ in range(block_count):
+ read_items.append(await self.read_data(writer_schema.items, decoder))
+ block_count = await decoder.read_long()
+ return read_items
+
+ async def skip_array(self, writer_schema, decoder):
+ block_count = await decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_size = await decoder.read_long()
+ await decoder.skip(block_size)
+ else:
+ for _ in range(block_count):
+ await self.skip_data(writer_schema.items, decoder)
+ block_count = await decoder.read_long()
+
+ async def read_map(self, writer_schema, decoder):
+ """
+ Maps are encoded as a series of blocks.
+
+ Each block consists of a long count value,
+ followed by that many key/value pairs.
+ A block with count zero indicates the end of the map.
+ Each item is encoded per the map's value schema.
+
+ If a block's count is negative,
+ then the count is followed immediately by a long block size,
+ indicating the number of bytes in the block.
+ The actual count in this case
+ is the absolute value of the count written.
+ """
+ read_items = {}
+ block_count = await decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_count = -block_count
+ await decoder.read_long()
+ for _ in range(block_count):
+ key = await decoder.read_utf8()
+ read_items[key] = await self.read_data(writer_schema.values, decoder)
+ block_count = await decoder.read_long()
+ return read_items
+
+ async def skip_map(self, writer_schema, decoder):
+ block_count = await decoder.read_long()
+ while block_count != 0:
+ if block_count < 0:
+ block_size = await decoder.read_long()
+ await decoder.skip(block_size)
+ else:
+ for _ in range(block_count):
+ await decoder.skip_utf8()
+ await self.skip_data(writer_schema.values, decoder)
+ block_count = await decoder.read_long()
+
+ async def read_union(self, writer_schema, decoder):
+ """
+ A union is encoded by first writing a long value indicating
+ the zero-based position within the union of the schema of its value.
+ The value is then encoded per the indicated schema within the union.
+ """
+ # schema resolution
+ index_of_schema = int(await decoder.read_long())
+ if index_of_schema >= len(writer_schema.schemas):
+ fail_msg = "Can't access branch index %d for union with %d branches" \
+ % (index_of_schema, len(writer_schema.schemas))
+ raise SchemaResolutionException(fail_msg, writer_schema)
+ selected_writer_schema = writer_schema.schemas[index_of_schema]
+
+ # read data
+ return await self.read_data(selected_writer_schema, decoder)
+
+ async def skip_union(self, writer_schema, decoder):
+ index_of_schema = int(await decoder.read_long())
+ if index_of_schema >= len(writer_schema.schemas):
+ fail_msg = "Can't access branch index %d for union with %d branches" \
+ % (index_of_schema, len(writer_schema.schemas))
+ raise SchemaResolutionException(fail_msg, writer_schema)
+ return await self.skip_data(writer_schema.schemas[index_of_schema], decoder)
+
+ async def read_record(self, writer_schema, decoder):
+ """
+ A record is encoded by encoding the values of its fields
+ in the order that they are declared. In other words, a record
+ is encoded as just the concatenation of the encodings of its fields.
+ Field values are encoded per their schema.
+
+ Schema Resolution:
+ * the ordering of fields may be different: fields are matched by name.
+ * schemas for fields with the same name in both records are resolved
+ recursively.
+ * if the writer's record contains a field with a name not present in the
+ reader's record, the writer's value for that field is ignored.
+ * if the reader's record schema has a field that contains a default value,
+ and writer's schema does not have a field with the same name, then the
+ reader should use the default value from its field.
+ * if the reader's record schema has a field with no default value, and
+ writer's schema does not have a field with the same name, then the
+ field's value is unset.
+ """
+ # schema resolution
+ read_record = {}
+ for field in writer_schema.fields:
+ field_val = await self.read_data(field.type, decoder)
+ read_record[field.name] = field_val
+ return read_record
+
+ async def skip_record(self, writer_schema, decoder):
+ for field in writer_schema.fields:
+ await self.skip_data(field.type, decoder)
+
+
+# ------------------------------------------------------------------------------
+
+if __name__ == '__main__':
+ raise Exception('Not a standalone module')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/datafile.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/datafile.py
new file mode 100644
index 00000000000..df06fe0cfe7
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/datafile.py
@@ -0,0 +1,266 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+"""Read/Write Avro File Object Containers."""
+
+import io
+import logging
+import sys
+import zlib
+
+from ..avro import avro_io
+from ..avro import schema
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+# Version of the container file:
+VERSION = 1
+
+if PY3:
+ MAGIC = b'Obj' + bytes([VERSION])
+ MAGIC_SIZE = len(MAGIC)
+else:
+ MAGIC = 'Obj' + chr(VERSION)
+ MAGIC_SIZE = len(MAGIC)
+
+# Size of the synchronization marker, in number of bytes:
+SYNC_SIZE = 16
+
+# Schema of the container header:
+META_SCHEMA = schema.parse("""
+{
+ "type": "record", "name": "org.apache.avro.file.Header",
+ "fields": [{
+ "name": "magic",
+ "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d}
+ }, {
+ "name": "meta",
+ "type": {"type": "map", "values": "bytes"}
+ }, {
+ "name": "sync",
+ "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d}
+ }]
+}
+""" % {
+ 'magic_size': MAGIC_SIZE,
+ 'sync_size': SYNC_SIZE,
+})
+
+# Codecs supported by container files:
+VALID_CODECS = frozenset(['null', 'deflate'])
+
+# Metadata key associated to the schema:
+SCHEMA_KEY = "avro.schema"
+
+
+# ------------------------------------------------------------------------------
+# Exceptions
+
+
+class DataFileException(schema.AvroException):
+ """Problem reading or writing file object containers."""
+
+# ------------------------------------------------------------------------------
+
+
+class DataFileReader(object): # pylint: disable=too-many-instance-attributes
+ """Read files written by DataFileWriter."""
+
+ def __init__(self, reader, datum_reader, **kwargs):
+ """Initializes a new data file reader.
+
+ Args:
+ reader: Open file to read from.
+ datum_reader: Avro datum reader.
+ """
+ self._reader = reader
+ self._raw_decoder = avro_io.BinaryDecoder(reader)
+ self._header_reader = kwargs.pop('header_reader', None)
+ self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader)
+ self._datum_decoder = None # Maybe reset at every block.
+ self._datum_reader = datum_reader
+
+ # In case self._reader only has partial content(without header).
+ # seek(0, 0) to make sure read the (partial)content from beginning.
+ self._reader.seek(0, 0)
+
+ # read the header: magic, meta, sync
+ self._read_header()
+
+ # ensure codec is valid
+ avro_codec_raw = self.get_meta('avro.codec')
+ if avro_codec_raw is None:
+ self.codec = "null"
+ else:
+ self.codec = avro_codec_raw.decode('utf-8')
+ if self.codec not in VALID_CODECS:
+ raise DataFileException('Unknown codec: %s.' % self.codec)
+
+ # get ready to read
+ self._block_count = 0
+
+ # object_position is to support reading from current position in the future read,
+ # no need to downloading from the beginning of avro.
+ if hasattr(self._reader, 'object_position'):
+ self.reader.track_object_position()
+
+ self._cur_object_index = 0
+ # header_reader indicates reader only has partial content. The reader doesn't have block header,
+ # so we read use the block count stored last time.
+ # Also ChangeFeed only has codec==null, so use _raw_decoder is good.
+ if self._header_reader is not None:
+ self._datum_decoder = self._raw_decoder
+
+ self.datum_reader.writer_schema = (
+ schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8')))
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, data_type, value, traceback):
+ # Perform a close if there's no exception
+ if data_type is None:
+ self.close()
+
+ def __iter__(self):
+ return self
+
+ # read-only properties
+ @property
+ def reader(self):
+ return self._reader
+
+ @property
+ def raw_decoder(self):
+ return self._raw_decoder
+
+ @property
+ def datum_decoder(self):
+ return self._datum_decoder
+
+ @property
+ def datum_reader(self):
+ return self._datum_reader
+
+ @property
+ def sync_marker(self):
+ return self._sync_marker
+
+ @property
+ def meta(self):
+ return self._meta
+
+ # read/write properties
+ @property
+ def block_count(self):
+ return self._block_count
+
+ def get_meta(self, key):
+ """Reports the value of a given metadata key.
+
+ Args:
+ key: Metadata key (string) to report the value of.
+ Returns:
+ Value associated to the metadata key, as bytes.
+ """
+ return self._meta.get(key)
+
+ def _read_header(self):
+ header_reader = self._header_reader if self._header_reader else self._reader
+ header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder
+
+ # seek to the beginning of the file to get magic block
+ header_reader.seek(0, 0)
+
+ # read header into a dict
+ header = self.datum_reader.read_data(META_SCHEMA, header_decoder)
+
+ # check magic number
+ if header.get('magic') != MAGIC:
+ fail_msg = "Not an Avro data file: %s doesn't match %s." \
+ % (header.get('magic'), MAGIC)
+ raise schema.AvroException(fail_msg)
+
+ # set metadata
+ self._meta = header['meta']
+
+ # set sync marker
+ self._sync_marker = header['sync']
+
+ def _read_block_header(self):
+ self._block_count = self.raw_decoder.read_long()
+ if self.codec == "null":
+ # Skip a long; we don't need to use the length.
+ self.raw_decoder.skip_long()
+ self._datum_decoder = self._raw_decoder
+ elif self.codec == 'deflate':
+ # Compressed data is stored as (length, data), which
+ # corresponds to how the "bytes" type is encoded.
+ data = self.raw_decoder.read_bytes()
+ # -15 is the log of the window size; negative indicates
+ # "raw" (no zlib headers) decompression. See zlib.h.
+ uncompressed = zlib.decompress(data, -15)
+ self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed))
+ else:
+ raise DataFileException("Unknown codec: %r" % self.codec)
+
+ def _skip_sync(self):
+ """
+ Read the length of the sync marker; if it matches the sync marker,
+ return True. Otherwise, seek back to where we started and return False.
+ """
+ proposed_sync_marker = self.reader.read(SYNC_SIZE)
+ if SYNC_SIZE > 0 and not proposed_sync_marker:
+ raise StopIteration
+ if proposed_sync_marker != self.sync_marker:
+ self.reader.seek(-SYNC_SIZE, 1)
+
+ def __next__(self):
+ """Return the next datum in the file."""
+ if self.block_count == 0:
+ self._skip_sync()
+
+ # object_position is to support reading from current position in the future read,
+ # no need to downloading from the beginning of avro file with this attr.
+ if hasattr(self._reader, 'object_position'):
+ self.reader.track_object_position()
+ self._cur_object_index = 0
+
+ self._read_block_header()
+
+ datum = self.datum_reader.read(self.datum_decoder)
+ self._block_count -= 1
+ self._cur_object_index += 1
+
+ # object_position is to support reading from current position in the future read,
+ # This will track the index of the next item to be read.
+ # This will also track the offset before the next sync marker.
+ if hasattr(self._reader, 'object_position'):
+ if self.block_count == 0:
+ # the next event to be read is at index 0 in the new chunk of blocks,
+ self.reader.track_object_position()
+ self.reader.set_object_index(0)
+ else:
+ self.reader.set_object_index(self._cur_object_index)
+
+ return datum
+
+ # PY2
+ def next(self):
+ return self.__next__()
+
+ def close(self):
+ """Close this reader."""
+ self.reader.close()
+
+
+if __name__ == '__main__':
+ raise Exception('Not a standalone module')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/datafile_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/datafile_async.py
new file mode 100644
index 00000000000..1e9d018228d
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/datafile_async.py
@@ -0,0 +1,215 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+"""Read/Write Avro File Object Containers."""
+
+import logging
+import sys
+
+from ..avro import avro_io_async
+from ..avro import schema
+from .datafile import DataFileException
+from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY
+
+
+PY3 = sys.version_info[0] == 3
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+# Codecs supported by container files:
+VALID_CODECS = frozenset(['null'])
+
+
+class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes
+ """Read files written by DataFileWriter."""
+
+ def __init__(self, reader, datum_reader, **kwargs):
+ """Initializes a new data file reader.
+
+ Args:
+ reader: Open file to read from.
+ datum_reader: Avro datum reader.
+ """
+ self._reader = reader
+ self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader)
+ self._header_reader = kwargs.pop('header_reader', None)
+ self._header_decoder = None if self._header_reader is None else \
+ avro_io_async.AsyncBinaryDecoder(self._header_reader)
+ self._datum_decoder = None # Maybe reset at every block.
+ self._datum_reader = datum_reader
+ self.codec = "null"
+ self._block_count = 0
+ self._cur_object_index = 0
+ self._meta = None
+ self._sync_marker = None
+
+ async def init(self):
+ # In case self._reader only has partial content(without header).
+ # seek(0, 0) to make sure read the (partial)content from beginning.
+ await self._reader.seek(0, 0)
+
+ # read the header: magic, meta, sync
+ await self._read_header()
+
+ # ensure codec is valid
+ avro_codec_raw = self.get_meta('avro.codec')
+ if avro_codec_raw is None:
+ self.codec = "null"
+ else:
+ self.codec = avro_codec_raw.decode('utf-8')
+ if self.codec not in VALID_CODECS:
+ raise DataFileException('Unknown codec: %s.' % self.codec)
+
+ # get ready to read
+ self._block_count = 0
+
+ # object_position is to support reading from current position in the future read,
+ # no need to downloading from the beginning of avro.
+ if hasattr(self._reader, 'object_position'):
+ self.reader.track_object_position()
+
+ # header_reader indicates reader only has partial content. The reader doesn't have block header,
+ # so we read use the block count stored last time.
+ # Also ChangeFeed only has codec==null, so use _raw_decoder is good.
+ if self._header_reader is not None:
+ self._datum_decoder = self._raw_decoder
+ self.datum_reader.writer_schema = (
+ schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8')))
+ return self
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, data_type, value, traceback):
+ # Perform a close if there's no exception
+ if data_type is None:
+ self.close()
+
+ def __aiter__(self):
+ return self
+
+ # read-only properties
+ @property
+ def reader(self):
+ return self._reader
+
+ @property
+ def raw_decoder(self):
+ return self._raw_decoder
+
+ @property
+ def datum_decoder(self):
+ return self._datum_decoder
+
+ @property
+ def datum_reader(self):
+ return self._datum_reader
+
+ @property
+ def sync_marker(self):
+ return self._sync_marker
+
+ @property
+ def meta(self):
+ return self._meta
+
+ # read/write properties
+ @property
+ def block_count(self):
+ return self._block_count
+
+ def get_meta(self, key):
+ """Reports the value of a given metadata key.
+
+ Args:
+ key: Metadata key (string) to report the value of.
+ Returns:
+ Value associated to the metadata key, as bytes.
+ """
+ return self._meta.get(key)
+
+ async def _read_header(self):
+ header_reader = self._header_reader if self._header_reader else self._reader
+ header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder
+
+ # seek to the beginning of the file to get magic block
+ await header_reader.seek(0, 0)
+
+ # read header into a dict
+ header = await self.datum_reader.read_data(META_SCHEMA, header_decoder)
+
+ # check magic number
+ if header.get('magic') != MAGIC:
+ fail_msg = "Not an Avro data file: %s doesn't match %s." \
+ % (header.get('magic'), MAGIC)
+ raise schema.AvroException(fail_msg)
+
+ # set metadata
+ self._meta = header['meta']
+
+ # set sync marker
+ self._sync_marker = header['sync']
+
+ async def _read_block_header(self):
+ self._block_count = await self.raw_decoder.read_long()
+ if self.codec == "null":
+ # Skip a long; we don't need to use the length.
+ await self.raw_decoder.skip_long()
+ self._datum_decoder = self._raw_decoder
+ else:
+ raise DataFileException("Unknown codec: %r" % self.codec)
+
+ async def _skip_sync(self):
+ """
+ Read the length of the sync marker; if it matches the sync marker,
+ return True. Otherwise, seek back to where we started and return False.
+ """
+ proposed_sync_marker = await self.reader.read(SYNC_SIZE)
+ if SYNC_SIZE > 0 and not proposed_sync_marker:
+ raise StopAsyncIteration
+ if proposed_sync_marker != self.sync_marker:
+ await self.reader.seek(-SYNC_SIZE, 1)
+
+ async def __anext__(self):
+ """Return the next datum in the file."""
+ if self.block_count == 0:
+ await self._skip_sync()
+
+ # object_position is to support reading from current position in the future read,
+ # no need to downloading from the beginning of avro file with this attr.
+ if hasattr(self._reader, 'object_position'):
+ await self.reader.track_object_position()
+ self._cur_object_index = 0
+
+ await self._read_block_header()
+
+ datum = await self.datum_reader.read(self.datum_decoder)
+ self._block_count -= 1
+ self._cur_object_index += 1
+
+ # object_position is to support reading from current position in the future read,
+ # This will track the index of the next item to be read.
+ # This will also track the offset before the next sync marker.
+ if hasattr(self._reader, 'object_position'):
+ if self.block_count == 0:
+ # the next event to be read is at index 0 in the new chunk of blocks,
+ await self.reader.track_object_position()
+ await self.reader.set_object_index(0)
+ else:
+ await self.reader.set_object_index(self._cur_object_index)
+
+ return datum
+
+ def close(self):
+ """Close this reader."""
+ self.reader.close()
+
+
+if __name__ == '__main__':
+ raise Exception('Not a standalone module')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/schema.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/schema.py
new file mode 100644
index 00000000000..ffe28530167
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/avro/schema.py
@@ -0,0 +1,1221 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines
+
+"""Representation of Avro schemas.
+
+A schema may be one of:
+ - A record, mapping field names to field value data;
+ - An error, equivalent to a record;
+ - An enum, containing one of a small set of symbols;
+ - An array of values, all of the same schema;
+ - A map containing string/value pairs, each of a declared schema;
+ - A union of other schemas;
+ - A fixed sized binary object;
+ - A unicode string;
+ - A sequence of bytes;
+ - A 32-bit signed int;
+ - A 64-bit signed long;
+ - A 32-bit floating-point float;
+ - A 64-bit floating-point double;
+ - A boolean;
+ - Null.
+"""
+
+import abc
+import json
+import logging
+import re
+import sys
+from six import with_metaclass
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+ _str = unicode # pylint: disable=undefined-variable
+else:
+ _str = str
+
+logger = logging.getLogger(__name__)
+
+# ------------------------------------------------------------------------------
+# Constants
+
+# Log level more verbose than DEBUG=10, INFO=20, etc.
+DEBUG_VERBOSE = 5
+
+NULL = 'null'
+BOOLEAN = 'boolean'
+STRING = 'string'
+BYTES = 'bytes'
+INT = 'int'
+LONG = 'long'
+FLOAT = 'float'
+DOUBLE = 'double'
+FIXED = 'fixed'
+ENUM = 'enum'
+RECORD = 'record'
+ERROR = 'error'
+ARRAY = 'array'
+MAP = 'map'
+UNION = 'union'
+
+# Request and error unions are part of Avro protocols:
+REQUEST = 'request'
+ERROR_UNION = 'error_union'
+
+PRIMITIVE_TYPES = frozenset([
+ NULL,
+ BOOLEAN,
+ STRING,
+ BYTES,
+ INT,
+ LONG,
+ FLOAT,
+ DOUBLE,
+])
+
+NAMED_TYPES = frozenset([
+ FIXED,
+ ENUM,
+ RECORD,
+ ERROR,
+])
+
+VALID_TYPES = frozenset.union(
+ PRIMITIVE_TYPES,
+ NAMED_TYPES,
+ [
+ ARRAY,
+ MAP,
+ UNION,
+ REQUEST,
+ ERROR_UNION,
+ ],
+)
+
+SCHEMA_RESERVED_PROPS = frozenset([
+ 'type',
+ 'name',
+ 'namespace',
+ 'fields', # Record
+ 'items', # Array
+ 'size', # Fixed
+ 'symbols', # Enum
+ 'values', # Map
+ 'doc',
+])
+
+FIELD_RESERVED_PROPS = frozenset([
+ 'default',
+ 'name',
+ 'doc',
+ 'order',
+ 'type',
+])
+
+VALID_FIELD_SORT_ORDERS = frozenset([
+ 'ascending',
+ 'descending',
+ 'ignore',
+])
+
+
+# ------------------------------------------------------------------------------
+# Exceptions
+
+
+class Error(Exception):
+ """Base class for errors in this module."""
+
+
+class AvroException(Error):
+ """Generic Avro schema error."""
+
+
+class SchemaParseException(AvroException):
+ """Error while parsing a JSON schema descriptor."""
+
+
+class Schema(with_metaclass(abc.ABCMeta, object)):
+ """Abstract base class for all Schema classes."""
+
+ def __init__(self, data_type, other_props=None):
+ """Initializes a new schema object.
+
+ Args:
+ data_type: Type of the schema to initialize.
+ other_props: Optional dictionary of additional properties.
+ """
+ if data_type not in VALID_TYPES:
+ raise SchemaParseException('%r is not a valid Avro type.' % data_type)
+
+ # All properties of this schema, as a map: property name -> property value
+ self._props = {}
+
+ self._props['type'] = data_type
+ self._type = data_type
+
+ if other_props:
+ self._props.update(other_props)
+
+ @property
+ def namespace(self):
+ """Returns: the namespace this schema belongs to, if any, or None."""
+ return self._props.get('namespace', None)
+
+ @property
+ def type(self):
+ """Returns: the type of this schema."""
+ return self._type
+
+ @property
+ def doc(self):
+ """Returns: the documentation associated to this schema, if any, or None."""
+ return self._props.get('doc', None)
+
+ @property
+ def props(self):
+ """Reports all the properties of this schema.
+
+ Includes all properties, reserved and non reserved.
+ JSON properties of this schema are directly generated from this dict.
+
+ Returns:
+ A dictionary of properties associated to this schema.
+ """
+ return self._props
+
+ @property
+ def other_props(self):
+ """Returns: the dictionary of non-reserved properties."""
+ return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS))
+
+ def __str__(self):
+ """Returns: the JSON representation of this schema."""
+ return json.dumps(self.to_json(names=None))
+
+ @abc.abstractmethod
+ def to_json(self, names):
+ """Converts the schema object into its AVRO specification representation.
+
+ Schema types that have names (records, enums, and fixed) must
+ be aware of not re-defining schemas that are already listed
+ in the parameter names.
+ """
+ raise Exception('Cannot run abstract method.')
+
+
+# ------------------------------------------------------------------------------
+
+
+_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*')
+
+_RE_FULL_NAME = re.compile(
+ r'^'
+ r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace
+ r'([A-Za-z_][A-Za-z0-9_]*)' # name
+ r'$'
+)
+
+
+class Name(object):
+ """Representation of an Avro name."""
+
+ def __init__(self, name, namespace=None):
+ """Parses an Avro name.
+
+ Args:
+ name: Avro name to parse (relative or absolute).
+ namespace: Optional explicit namespace if the name is relative.
+ """
+ # Normalize: namespace is always defined as a string, possibly empty.
+ if namespace is None:
+ namespace = ''
+
+ if '.' in name:
+ # name is absolute, namespace is ignored:
+ self._fullname = name
+
+ match = _RE_FULL_NAME.match(self._fullname)
+ if match is None:
+ raise SchemaParseException(
+ 'Invalid absolute schema name: %r.' % self._fullname)
+
+ self._name = match.group(1)
+ self._namespace = self._fullname[:-(len(self._name) + 1)]
+
+ else:
+ # name is relative, combine with explicit namespace:
+ self._name = name
+ self._namespace = namespace
+ self._fullname = (self._name
+ if (not self._namespace) else
+ '%s.%s' % (self._namespace, self._name))
+
+ # Validate the fullname:
+ if _RE_FULL_NAME.match(self._fullname) is None:
+ raise SchemaParseException(
+ 'Invalid schema name %r infered from name %r and namespace %r.'
+ % (self._fullname, self._name, self._namespace))
+
+ def __eq__(self, other):
+ if not isinstance(other, Name):
+ return NotImplemented
+ return self.fullname == other.fullname
+
+ @property
+ def simple_name(self):
+ """Returns: the simple name part of this name."""
+ return self._name
+
+ @property
+ def namespace(self):
+ """Returns: this name's namespace, possible the empty string."""
+ return self._namespace
+
+ @property
+ def fullname(self):
+ """Returns: the full name."""
+ return self._fullname
+
+
+# ------------------------------------------------------------------------------
+
+
+class Names(object):
+ """Tracks Avro named schemas and default namespace during parsing."""
+
+ def __init__(self, default_namespace=None, names=None):
+ """Initializes a new name tracker.
+
+ Args:
+ default_namespace: Optional default namespace.
+ names: Optional initial mapping of known named schemas.
+ """
+ if names is None:
+ names = {}
+ self._names = names
+ self._default_namespace = default_namespace
+
+ @property
+ def names(self):
+ """Returns: the mapping of known named schemas."""
+ return self._names
+
+ @property
+ def default_namespace(self):
+ """Returns: the default namespace, if any, or None."""
+ return self._default_namespace
+
+ def new_with_default_namespace(self, namespace):
+ """Creates a new name tracker from this tracker, but with a new default ns.
+
+ Args:
+ namespace: New default namespace to use.
+ Returns:
+ New name tracker with the specified default namespace.
+ """
+ return Names(names=self._names, default_namespace=namespace)
+
+ def get_name(self, name, namespace=None):
+ """Resolves the Avro name according to this name tracker's state.
+
+ Args:
+ name: Name to resolve (absolute or relative).
+ namespace: Optional explicit namespace.
+ Returns:
+ The specified name, resolved according to this tracker.
+ """
+ if namespace is None:
+ namespace = self._default_namespace
+ return Name(name=name, namespace=namespace)
+
+ def get_schema(self, name, namespace=None):
+ """Resolves an Avro schema by name.
+
+ Args:
+ name: Name (relative or absolute) of the Avro schema to look up.
+ namespace: Optional explicit namespace.
+ Returns:
+ The schema with the specified name, if any, or None.
+ """
+ avro_name = self.get_name(name=name, namespace=namespace)
+ return self._names.get(avro_name.fullname, None)
+
+ def prune_namespace(self, properties):
+ """given a properties, return properties with namespace removed if
+ it matches the own default namespace
+ """
+ if self.default_namespace is None:
+ # I have no default -- no change
+ return properties
+ if 'namespace' not in properties:
+ # he has no namespace - no change
+ return properties
+ if properties['namespace'] != self.default_namespace:
+ # we're different - leave his stuff alone
+ return properties
+ # we each have a namespace and it's redundant. delete his.
+ prunable = properties.copy()
+ del prunable['namespace']
+ return prunable
+
+ def register(self, schema):
+ """Registers a new named schema in this tracker.
+
+ Args:
+ schema: Named Avro schema to register in this tracker.
+ """
+ if schema.fullname in VALID_TYPES:
+ raise SchemaParseException(
+ '%s is a reserved type name.' % schema.fullname)
+ if schema.fullname in self.names:
+ raise SchemaParseException(
+ 'Avro name %r already exists.' % schema.fullname)
+
+ logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname)
+ self._names[schema.fullname] = schema
+
+
+# ------------------------------------------------------------------------------
+
+
+class NamedSchema(Schema):
+ """Abstract base class for named schemas.
+
+ Named schemas are enumerated in NAMED_TYPES.
+ """
+
+ def __init__(
+ self,
+ data_type,
+ name=None,
+ namespace=None,
+ names=None,
+ other_props=None,
+ ):
+ """Initializes a new named schema object.
+
+ Args:
+ data_type: Type of the named schema.
+ name: Name (absolute or relative) of the schema.
+ namespace: Optional explicit namespace if name is relative.
+ names: Tracker to resolve and register Avro names.
+ other_props: Optional map of additional properties of the schema.
+ """
+ assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type)
+ self._avro_name = names.get_name(name=name, namespace=namespace)
+
+ super(NamedSchema, self).__init__(data_type, other_props)
+
+ names.register(self)
+
+ self._props['name'] = self.name
+ if self.namespace:
+ self._props['namespace'] = self.namespace
+
+ @property
+ def avro_name(self):
+ """Returns: the Name object describing this schema's name."""
+ return self._avro_name
+
+ @property
+ def name(self):
+ return self._avro_name.simple_name
+
+ @property
+ def namespace(self):
+ return self._avro_name.namespace
+
+ @property
+ def fullname(self):
+ return self._avro_name.fullname
+
+ def name_ref(self, names):
+ """Reports this schema name relative to the specified name tracker.
+
+ Args:
+ names: Avro name tracker to relativise this schema name against.
+ Returns:
+ This schema name, relativised against the specified name tracker.
+ """
+ if self.namespace == names.default_namespace:
+ return self.name
+ return self.fullname
+
+ @abc.abstractmethod
+ def to_json(self, names):
+ """Converts the schema object into its AVRO specification representation.
+
+ Schema types that have names (records, enums, and fixed) must
+ be aware of not re-defining schemas that are already listed
+ in the parameter names.
+ """
+ raise Exception('Cannot run abstract method.')
+
+# ------------------------------------------------------------------------------
+
+
+_NO_DEFAULT = object()
+
+
+class Field(object):
+ """Representation of the schema of a field in a record."""
+
+ def __init__(
+ self,
+ data_type,
+ name,
+ index,
+ has_default,
+ default=_NO_DEFAULT,
+ order=None,
+ doc=None,
+ other_props=None
+ ):
+ """Initializes a new Field object.
+
+ Args:
+ data_type: Avro schema of the field.
+ name: Name of the field.
+ index: 0-based position of the field.
+ has_default:
+ default:
+ order:
+ doc:
+ other_props:
+ """
+ if (not isinstance(name, _str)) or (not name):
+ raise SchemaParseException('Invalid record field name: %r.' % name)
+ if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS):
+ raise SchemaParseException('Invalid record field order: %r.' % order)
+
+ # All properties of this record field:
+ self._props = {}
+
+ self._has_default = has_default
+ if other_props:
+ self._props.update(other_props)
+
+ self._index = index
+ self._type = self._props['type'] = data_type
+ self._name = self._props['name'] = name
+
+ if has_default:
+ self._props['default'] = default
+
+ if order is not None:
+ self._props['order'] = order
+
+ if doc is not None:
+ self._props['doc'] = doc
+
+ @property
+ def type(self):
+ """Returns: the schema of this field."""
+ return self._type
+
+ @property
+ def name(self):
+ """Returns: this field name."""
+ return self._name
+
+ @property
+ def index(self):
+ """Returns: the 0-based index of this field in the record."""
+ return self._index
+
+ @property
+ def default(self):
+ return self._props['default']
+
+ @property
+ def has_default(self):
+ return self._has_default
+
+ @property
+ def order(self):
+ return self._props.get('order', None)
+
+ @property
+ def doc(self):
+ return self._props.get('doc', None)
+
+ @property
+ def props(self):
+ return self._props
+
+ @property
+ def other_props(self):
+ return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS)
+
+ def __str__(self):
+ return json.dumps(self.to_json())
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ to_dump = self.props.copy()
+ to_dump['type'] = self.type.to_json(names)
+ return to_dump
+
+ def __eq__(self, that):
+ to_cmp = json.loads(_str(self))
+ return to_cmp == json.loads(_str(that))
+
+
+# ------------------------------------------------------------------------------
+# Primitive Types
+
+
+class PrimitiveSchema(Schema):
+ """Schema of a primitive Avro type.
+
+ Valid primitive types are defined in PRIMITIVE_TYPES.
+ """
+
+ def __init__(self, data_type, other_props=None):
+ """Initializes a new schema object for the specified primitive type.
+
+ Args:
+ data_type: Type of the schema to construct. Must be primitive.
+ """
+ if data_type not in PRIMITIVE_TYPES:
+ raise AvroException('%r is not a valid primitive type.' % data_type)
+ super(PrimitiveSchema, self).__init__(data_type, other_props=other_props)
+
+ @property
+ def name(self):
+ """Returns: the simple name of this schema."""
+ # The name of a primitive type is the type itself.
+ return self.type
+
+ @property
+ def fullname(self):
+ """Returns: the fully qualified name of this schema."""
+ # The full name is the simple name for primitive schema.
+ return self.name
+
+ def to_json(self, names=None):
+ if len(self.props) == 1:
+ return self.fullname
+ return self.props
+
+ def __eq__(self, that):
+ return self.props == that.props
+
+
+# ------------------------------------------------------------------------------
+# Complex Types (non-recursive)
+
+
+class FixedSchema(NamedSchema):
+ def __init__(
+ self,
+ name,
+ namespace,
+ size,
+ names=None,
+ other_props=None,
+ ):
+ # Ensure valid ctor args
+ if not isinstance(size, int):
+ fail_msg = 'Fixed Schema requires a valid integer for size property.'
+ raise AvroException(fail_msg)
+
+ super(FixedSchema, self).__init__(
+ data_type=FIXED,
+ name=name,
+ namespace=namespace,
+ names=names,
+ other_props=other_props,
+ )
+ self._props['size'] = size
+
+ @property
+ def size(self):
+ """Returns: the size of this fixed schema, in bytes."""
+ return self._props['size']
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ if self.fullname in names.names:
+ return self.name_ref(names)
+ names.names[self.fullname] = self
+ return names.prune_namespace(self.props)
+
+ def __eq__(self, that):
+ return self.props == that.props
+
+
+# ------------------------------------------------------------------------------
+
+
+class EnumSchema(NamedSchema):
+ def __init__(
+ self,
+ name,
+ namespace,
+ symbols,
+ names=None,
+ doc=None,
+ other_props=None,
+ ):
+ """Initializes a new enumeration schema object.
+
+ Args:
+ name: Simple name of this enumeration.
+ namespace: Optional namespace.
+ symbols: Ordered list of symbols defined in this enumeration.
+ names:
+ doc:
+ other_props:
+ """
+ symbols = tuple(symbols)
+ symbol_set = frozenset(symbols)
+ if (len(symbol_set) != len(symbols)
+ or not all(map(lambda symbol: isinstance(symbol, _str), symbols))):
+ raise AvroException(
+ 'Invalid symbols for enum schema: %r.' % (symbols,))
+
+ super(EnumSchema, self).__init__(
+ data_type=ENUM,
+ name=name,
+ namespace=namespace,
+ names=names,
+ other_props=other_props,
+ )
+
+ self._props['symbols'] = symbols
+ if doc is not None:
+ self._props['doc'] = doc
+
+ @property
+ def symbols(self):
+ """Returns: the symbols defined in this enum."""
+ return self._props['symbols']
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ if self.fullname in names.names:
+ return self.name_ref(names)
+ names.names[self.fullname] = self
+ return names.prune_namespace(self.props)
+
+ def __eq__(self, that):
+ return self.props == that.props
+
+
+# ------------------------------------------------------------------------------
+# Complex Types (recursive)
+
+
+class ArraySchema(Schema):
+ """Schema of an array."""
+
+ def __init__(self, items, other_props=None):
+ """Initializes a new array schema object.
+
+ Args:
+ items: Avro schema of the array items.
+ other_props:
+ """
+ super(ArraySchema, self).__init__(
+ data_type=ARRAY,
+ other_props=other_props,
+ )
+ self._items_schema = items
+ self._props['items'] = items
+
+ @property
+ def items(self):
+ """Returns: the schema of the items in this array."""
+ return self._items_schema
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ to_dump = self.props.copy()
+ item_schema = self.items
+ to_dump['items'] = item_schema.to_json(names)
+ return to_dump
+
+ def __eq__(self, that):
+ to_cmp = json.loads(_str(self))
+ return to_cmp == json.loads(_str(that))
+
+
+# ------------------------------------------------------------------------------
+
+
+class MapSchema(Schema):
+ """Schema of a map."""
+
+ def __init__(self, values, other_props=None):
+ """Initializes a new map schema object.
+
+ Args:
+ values: Avro schema of the map values.
+ other_props:
+ """
+ super(MapSchema, self).__init__(
+ data_type=MAP,
+ other_props=other_props,
+ )
+ self._values_schema = values
+ self._props['values'] = values
+
+ @property
+ def values(self):
+ """Returns: the schema of the values in this map."""
+ return self._values_schema
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ to_dump = self.props.copy()
+ to_dump['values'] = self.values.to_json(names)
+ return to_dump
+
+ def __eq__(self, that):
+ to_cmp = json.loads(_str(self))
+ return to_cmp == json.loads(_str(that))
+
+
+# ------------------------------------------------------------------------------
+
+
+class UnionSchema(Schema):
+ """Schema of a union."""
+
+ def __init__(self, schemas):
+ """Initializes a new union schema object.
+
+ Args:
+ schemas: Ordered collection of schema branches in the union.
+ """
+ super(UnionSchema, self).__init__(data_type=UNION)
+ self._schemas = tuple(schemas)
+
+ # Validate the schema branches:
+
+ # All named schema names are unique:
+ named_branches = tuple(
+ filter(lambda schema: schema.type in NAMED_TYPES, self._schemas))
+ unique_names = frozenset(map(lambda schema: schema.fullname, named_branches))
+ if len(unique_names) != len(named_branches):
+ raise AvroException(
+ 'Invalid union branches with duplicate schema name:%s'
+ % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
+
+ # Types are unique within unnamed schemas, and union is not allowed:
+ unnamed_branches = tuple(
+ filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas))
+ unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches))
+ if UNION in unique_types:
+ raise AvroException(
+ 'Invalid union branches contain other unions:%s'
+ % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
+ if len(unique_types) != len(unnamed_branches):
+ raise AvroException(
+ 'Invalid union branches with duplicate type:%s'
+ % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas)))
+
+ @property
+ def schemas(self):
+ """Returns: the ordered list of schema branches in the union."""
+ return self._schemas
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ to_dump = []
+ for schema in self.schemas:
+ to_dump.append(schema.to_json(names))
+ return to_dump
+
+ def __eq__(self, that):
+ to_cmp = json.loads(_str(self))
+ return to_cmp == json.loads(_str(that))
+
+
+# ------------------------------------------------------------------------------
+
+
+class ErrorUnionSchema(UnionSchema):
+ """Schema representing the declared errors of a protocol message."""
+
+ def __init__(self, schemas):
+ """Initializes an error-union schema.
+
+ Args:
+ schema: collection of error schema.
+ """
+ # Prepend "string" to handle system errors
+ schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas)
+ super(ErrorUnionSchema, self).__init__(schemas=schemas)
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ to_dump = []
+ for schema in self.schemas:
+ # Don't print the system error schema
+ if schema.type == STRING:
+ continue
+ to_dump.append(schema.to_json(names))
+ return to_dump
+
+
+# ------------------------------------------------------------------------------
+
+
+class RecordSchema(NamedSchema):
+ """Schema of a record."""
+
+ @staticmethod
+ def _make_field(index, field_desc, names):
+ """Builds field schemas from a list of field JSON descriptors.
+
+ Args:
+ index: 0-based index of the field in the record.
+ field_desc: JSON descriptors of a record field.
+ Return:
+ The field schema.
+ """
+ field_schema = schema_from_json_data(
+ json_data=field_desc['type'],
+ names=names,
+ )
+ other_props = (
+ dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS)))
+ return Field(
+ data_type=field_schema,
+ name=field_desc['name'],
+ index=index,
+ has_default=('default' in field_desc),
+ default=field_desc.get('default', _NO_DEFAULT),
+ order=field_desc.get('order', None),
+ doc=field_desc.get('doc', None),
+ other_props=other_props,
+ )
+
+ @staticmethod
+ def make_field_list(field_desc_list, names):
+ """Builds field schemas from a list of field JSON descriptors.
+
+ Guarantees field name unicity.
+
+ Args:
+ field_desc_list: collection of field JSON descriptors.
+ names: Avro schema tracker.
+ Yields
+ Field schemas.
+ """
+ for index, field_desc in enumerate(field_desc_list):
+ yield RecordSchema._make_field(index, field_desc, names)
+
+ @staticmethod
+ def _make_field_map(fields):
+ """Builds the field map.
+
+ Guarantees field name unicity.
+
+ Args:
+ fields: iterable of field schema.
+ Returns:
+ A map of field schemas, indexed by name.
+ """
+ field_map = {}
+ for field in fields:
+ if field.name in field_map:
+ raise SchemaParseException(
+ 'Duplicate record field name %r.' % field.name)
+ field_map[field.name] = field
+ return field_map
+
+ def __init__(
+ self,
+ name,
+ namespace,
+ fields=None,
+ make_fields=None,
+ names=None,
+ record_type=RECORD,
+ doc=None,
+ other_props=None
+ ):
+ """Initializes a new record schema object.
+
+ Args:
+ name: Name of the record (absolute or relative).
+ namespace: Optional namespace the record belongs to, if name is relative.
+ fields: collection of fields to add to this record.
+ Exactly one of fields or make_fields must be specified.
+ make_fields: function creating the fields that belong to the record.
+ The function signature is: make_fields(names) -> ordered field list.
+ Exactly one of fields or make_fields must be specified.
+ names:
+ record_type: Type of the record: one of RECORD, ERROR or REQUEST.
+ Protocol requests are not named.
+ doc:
+ other_props:
+ """
+ if record_type == REQUEST:
+ # Protocol requests are not named:
+ super(RecordSchema, self).__init__(
+ data_type=REQUEST,
+ other_props=other_props,
+ )
+ elif record_type in [RECORD, ERROR]:
+ # Register this record name in the tracker:
+ super(RecordSchema, self).__init__(
+ data_type=record_type,
+ name=name,
+ namespace=namespace,
+ names=names,
+ other_props=other_props,
+ )
+ else:
+ raise SchemaParseException(
+ 'Invalid record type: %r.' % record_type)
+
+ if record_type in [RECORD, ERROR]:
+ avro_name = names.get_name(name=name, namespace=namespace)
+ nested_names = names.new_with_default_namespace(namespace=avro_name.namespace)
+ elif record_type == REQUEST:
+ # Protocol request has no name: no need to change default namespace:
+ nested_names = names
+
+ if fields is None:
+ fields = make_fields(names=nested_names)
+ else:
+ assert make_fields is None
+ self._fields = tuple(fields)
+
+ self._field_map = RecordSchema._make_field_map(self._fields)
+
+ self._props['fields'] = fields
+ if doc is not None:
+ self._props['doc'] = doc
+
+ @property
+ def fields(self):
+ """Returns: the field schemas, as an ordered tuple."""
+ return self._fields
+
+ @property
+ def field_map(self):
+ """Returns: a read-only map of the field schemas index by field names."""
+ return self._field_map
+
+ def to_json(self, names=None):
+ if names is None:
+ names = Names()
+ # Request records don't have names
+ if self.type == REQUEST:
+ return [f.to_json(names) for f in self.fields]
+
+ if self.fullname in names.names:
+ return self.name_ref(names)
+ names.names[self.fullname] = self
+
+ to_dump = names.prune_namespace(self.props.copy())
+ to_dump['fields'] = [f.to_json(names) for f in self.fields]
+ return to_dump
+
+ def __eq__(self, that):
+ to_cmp = json.loads(_str(self))
+ return to_cmp == json.loads(_str(that))
+
+
+# ------------------------------------------------------------------------------
+# Module functions
+
+
+def filter_keys_out(items, keys):
+ """Filters a collection of (key, value) items.
+
+ Exclude any item whose key belongs to keys.
+
+ Args:
+ items: Dictionary of items to filter the keys out of.
+ keys: Keys to filter out.
+ Yields:
+ Filtered items.
+ """
+ for key, value in items.items():
+ if key in keys:
+ continue
+ yield key, value
+
+
+# ------------------------------------------------------------------------------
+
+
+def _schema_from_json_string(json_string, names):
+ if json_string in PRIMITIVE_TYPES:
+ return PrimitiveSchema(data_type=json_string)
+
+ # Look for a known named schema:
+ schema = names.get_schema(name=json_string)
+ if schema is None:
+ raise SchemaParseException(
+ 'Unknown named schema %r, known names: %r.'
+ % (json_string, sorted(names.names)))
+ return schema
+
+
+def _schema_from_json_array(json_array, names):
+ def MakeSchema(desc):
+ return schema_from_json_data(json_data=desc, names=names)
+
+ return UnionSchema(map(MakeSchema, json_array))
+
+
+def _schema_from_json_object(json_object, names):
+ data_type = json_object.get('type')
+ if data_type is None:
+ raise SchemaParseException(
+ 'Avro schema JSON descriptor has no "type" property: %r' % json_object)
+
+ other_props = dict(
+ filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS))
+
+ if data_type in PRIMITIVE_TYPES:
+ # FIXME should not ignore other properties
+ result = PrimitiveSchema(data_type, other_props=other_props)
+
+ elif data_type in NAMED_TYPES:
+ name = json_object.get('name')
+ namespace = json_object.get('namespace', names.default_namespace)
+ if data_type == FIXED:
+ size = json_object.get('size')
+ result = FixedSchema(name, namespace, size, names, other_props)
+ elif data_type == ENUM:
+ symbols = json_object.get('symbols')
+ doc = json_object.get('doc')
+ result = EnumSchema(name, namespace, symbols, names, doc, other_props)
+
+ elif data_type in [RECORD, ERROR]:
+ field_desc_list = json_object.get('fields', ())
+
+ def MakeFields(names):
+ return tuple(RecordSchema.make_field_list(field_desc_list, names))
+
+ result = RecordSchema(
+ name=name,
+ namespace=namespace,
+ make_fields=MakeFields,
+ names=names,
+ record_type=data_type,
+ doc=json_object.get('doc'),
+ other_props=other_props,
+ )
+ else:
+ raise Exception('Internal error: unknown type %r.' % data_type)
+
+ elif data_type in VALID_TYPES:
+ # Unnamed, non-primitive Avro type:
+
+ if data_type == ARRAY:
+ items_desc = json_object.get('items')
+ if items_desc is None:
+ raise SchemaParseException(
+ 'Invalid array schema descriptor with no "items" : %r.'
+ % json_object)
+ result = ArraySchema(
+ items=schema_from_json_data(items_desc, names),
+ other_props=other_props,
+ )
+
+ elif data_type == MAP:
+ values_desc = json_object.get('values')
+ if values_desc is None:
+ raise SchemaParseException(
+ 'Invalid map schema descriptor with no "values" : %r.'
+ % json_object)
+ result = MapSchema(
+ values=schema_from_json_data(values_desc, names=names),
+ other_props=other_props,
+ )
+
+ elif data_type == ERROR_UNION:
+ error_desc_list = json_object.get('declared_errors')
+ assert error_desc_list is not None
+ error_schemas = map(
+ lambda desc: schema_from_json_data(desc, names=names),
+ error_desc_list)
+ result = ErrorUnionSchema(schemas=error_schemas)
+
+ else:
+ raise Exception('Internal error: unknown type %r.' % data_type)
+ else:
+ raise SchemaParseException(
+ 'Invalid JSON descriptor for an Avro schema: %r' % json_object)
+ return result
+
+
+# Parsers for the JSON data types:
+_JSONDataParserTypeMap = {
+ _str: _schema_from_json_string,
+ list: _schema_from_json_array,
+ dict: _schema_from_json_object,
+}
+
+
+def schema_from_json_data(json_data, names=None):
+ """Builds an Avro Schema from its JSON descriptor.
+
+ Args:
+ json_data: JSON data representing the descriptor of the Avro schema.
+ names: Optional tracker for Avro named schemas.
+ Returns:
+ The Avro schema parsed from the JSON descriptor.
+ Raises:
+ SchemaParseException: if the descriptor is invalid.
+ """
+ if names is None:
+ names = Names()
+
+ # Select the appropriate parser based on the JSON data type:
+ parser = _JSONDataParserTypeMap.get(type(json_data))
+ if parser is None:
+ raise SchemaParseException(
+ 'Invalid JSON descriptor for an Avro schema: %r.' % json_data)
+ return parser(json_data, names=names)
+
+
+# ------------------------------------------------------------------------------
+
+
+def parse(json_string):
+ """Constructs a Schema from its JSON descriptor in text form.
+
+ Args:
+ json_string: String representation of the JSON descriptor of the schema.
+ Returns:
+ The parsed schema.
+ Raises:
+ SchemaParseException: on JSON parsing error,
+ or if the JSON descriptor is invalid.
+ """
+ try:
+ json_data = json.loads(json_string)
+ except Exception as exn:
+ raise SchemaParseException(
+ 'Error parsing schema from JSON: %r. '
+ 'Error message: %r.'
+ % (json_string, exn))
+
+ # Initialize the names object
+ names = Names()
+
+ # construct the Avro Schema object
+ return schema_from_json_data(json_data, names)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/base_client.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/base_client.py
new file mode 100644
index 00000000000..fae8bd74b8b
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/base_client.py
@@ -0,0 +1,443 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union,
+ Optional,
+ Any,
+ Iterable,
+ Dict,
+ List,
+ Type,
+ Tuple,
+ TYPE_CHECKING,
+)
+import logging
+
+try:
+ from urllib.parse import parse_qs, quote
+except ImportError:
+ from urlparse import parse_qs # type: ignore
+ from urllib2 import quote # type: ignore
+
+import six
+
+from azure.core.configuration import Configuration
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import Pipeline
+from azure.core.pipeline.transport import RequestsTransport, HttpTransport
+from azure.core.pipeline.policies import (
+ RedirectPolicy,
+ ContentDecodePolicy,
+ BearerTokenCredentialPolicy,
+ ProxyPolicy,
+ DistributedTracingPolicy,
+ HttpLoggingPolicy,
+ UserAgentPolicy
+)
+
+from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT
+from .models import LocationMode
+from .authentication import SharedKeyCredentialPolicy
+from .shared_access_signature import QueryStringConstants
+from .policies import (
+ StorageHeadersPolicy,
+ StorageContentValidation,
+ StorageRequestHook,
+ StorageResponseHook,
+ StorageLoggingPolicy,
+ StorageHosts,
+ QueueMessagePolicy,
+ ExponentialRetry,
+)
+from .._version import VERSION
+from .._generated.models import StorageErrorException
+from .response_handlers import process_storage_error, PartialBatchErrorException
+
+
+_LOGGER = logging.getLogger(__name__)
+_SERVICE_PARAMS = {
+ "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"},
+ "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"},
+ "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"},
+ "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"},
+}
+
+
+class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes
+ def __init__(
+ self,
+ parsed_url, # type: Any
+ service, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY)
+ self._hosts = kwargs.get("_hosts")
+ self.scheme = parsed_url.scheme
+
+ if service not in ["blob", "queue", "file-share", "dfs"]:
+ raise ValueError("Invalid service: {}".format(service))
+ service_name = service.split('-')[0]
+ account = parsed_url.netloc.split(".{}.core.".format(service_name))
+
+ self.account_name = account[0] if len(account) > 1 else None
+ if not self.account_name and parsed_url.netloc.startswith("localhost") \
+ or parsed_url.netloc.startswith("127.0.0.1"):
+ self.account_name = parsed_url.path.strip("/")
+
+ self.credential = _format_shared_key_credential(self.account_name, credential)
+ if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"):
+ raise ValueError("Token credential is only supported with HTTPS.")
+
+ secondary_hostname = None
+ if hasattr(self.credential, "account_name"):
+ self.account_name = self.credential.account_name
+ secondary_hostname = "{}-secondary.{}.{}".format(
+ self.credential.account_name, service_name, SERVICE_HOST_BASE)
+
+ if not self._hosts:
+ if len(account) > 1:
+ secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary")
+ if kwargs.get("secondary_hostname"):
+ secondary_hostname = kwargs["secondary_hostname"]
+ primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/')
+ self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname}
+
+ self.require_encryption = kwargs.get("require_encryption", False)
+ self.key_encryption_key = kwargs.get("key_encryption_key")
+ self.key_resolver_function = kwargs.get("key_resolver_function")
+ self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs)
+
+ def __enter__(self):
+ self._client.__enter__()
+ return self
+
+ def __exit__(self, *args):
+ self._client.__exit__(*args)
+
+ def close(self):
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ self._client.close()
+
+ @property
+ def url(self):
+ """The full endpoint URL to this entity, including SAS token if used.
+
+ This could be either the primary endpoint,
+ or the secondary endpoint depending on the current :func:`location_mode`.
+ """
+ return self._format_url(self._hosts[self._location_mode])
+
+ @property
+ def primary_endpoint(self):
+ """The full primary endpoint URL.
+
+ :type: str
+ """
+ return self._format_url(self._hosts[LocationMode.PRIMARY])
+
+ @property
+ def primary_hostname(self):
+ """The hostname of the primary endpoint.
+
+ :type: str
+ """
+ return self._hosts[LocationMode.PRIMARY]
+
+ @property
+ def secondary_endpoint(self):
+ """The full secondary endpoint URL if configured.
+
+ If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional
+ `secondary_hostname` keyword argument on instantiation.
+
+ :type: str
+ :raise ValueError:
+ """
+ if not self._hosts[LocationMode.SECONDARY]:
+ raise ValueError("No secondary host configured.")
+ return self._format_url(self._hosts[LocationMode.SECONDARY])
+
+ @property
+ def secondary_hostname(self):
+ """The hostname of the secondary endpoint.
+
+ If not available this will be None. To explicitly specify a secondary hostname, use the optional
+ `secondary_hostname` keyword argument on instantiation.
+
+ :type: str or None
+ """
+ return self._hosts[LocationMode.SECONDARY]
+
+ @property
+ def location_mode(self):
+ """The location mode that the client is currently using.
+
+ By default this will be "primary". Options include "primary" and "secondary".
+
+ :type: str
+ """
+
+ return self._location_mode
+
+ @location_mode.setter
+ def location_mode(self, value):
+ if self._hosts.get(value):
+ self._location_mode = value
+ self._client._config.url = self.url # pylint: disable=protected-access
+ else:
+ raise ValueError("No host URL for location mode: {}".format(value))
+
+ @property
+ def api_version(self):
+ """The version of the Storage API used for requests.
+
+ :type: str
+ """
+ return self._client._config.version # pylint: disable=protected-access
+
+ def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None):
+ query_str = "?"
+ if snapshot:
+ query_str += "snapshot={}&".format(self.snapshot)
+ if share_snapshot:
+ query_str += "sharesnapshot={}&".format(self.snapshot)
+ if sas_token and not credential:
+ query_str += sas_token
+ elif is_credential_sastoken(credential):
+ query_str += credential.lstrip("?")
+ credential = None
+ return query_str.rstrip("?&"), credential
+
+ def _create_pipeline(self, credential, **kwargs):
+ # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
+ self._credential_policy = None
+ if hasattr(credential, "get_token"):
+ self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
+ elif isinstance(credential, SharedKeyCredentialPolicy):
+ self._credential_policy = credential
+ elif credential is not None:
+ raise TypeError("Unsupported credential: {}".format(credential))
+
+ config = kwargs.get("_configuration") or create_configuration(**kwargs)
+ if kwargs.get("_pipeline"):
+ return config, kwargs["_pipeline"]
+ config.transport = kwargs.get("transport") # type: ignore
+ kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+ kwargs.setdefault("read_timeout", READ_TIMEOUT)
+ if not config.transport:
+ config.transport = RequestsTransport(**kwargs)
+ policies = [
+ QueueMessagePolicy(),
+ config.proxy_policy,
+ config.user_agent_policy,
+ StorageContentValidation(),
+ ContentDecodePolicy(response_encoding="utf-8"),
+ RedirectPolicy(**kwargs),
+ StorageHosts(hosts=self._hosts, **kwargs),
+ config.retry_policy,
+ config.headers_policy,
+ StorageRequestHook(**kwargs),
+ self._credential_policy,
+ config.logging_policy,
+ StorageResponseHook(**kwargs),
+ DistributedTracingPolicy(**kwargs),
+ HttpLoggingPolicy(**kwargs)
+ ]
+ if kwargs.get("_additional_pipeline_policies"):
+ policies = policies + kwargs.get("_additional_pipeline_policies")
+ return config, Pipeline(config.transport, policies=policies)
+
+ def _batch_send(
+ self, *reqs, # type: HttpRequest
+ **kwargs
+ ):
+ """Given a series of request, do a Storage batch call.
+ """
+ # Pop it here, so requests doesn't feel bad about additional kwarg
+ raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+ request = self._client._client.post( # pylint: disable=protected-access
+ url='{}://{}/?comp=batch{}{}'.format(
+ self.scheme,
+ self.primary_hostname,
+ kwargs.pop('sas', ""),
+ kwargs.pop('timeout', "")
+ ),
+ headers={
+ 'x-ms-version': self.api_version
+ }
+ )
+
+ policies = [StorageHeadersPolicy()]
+ if self._credential_policy:
+ policies.append(self._credential_policy)
+
+ request.set_multipart_mixed(
+ *reqs,
+ policies=policies,
+ enforce_https=False
+ )
+
+ pipeline_response = self._pipeline.run(
+ request, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ try:
+ if response.status_code not in [202]:
+ raise HttpResponseError(response=response)
+ parts = response.parts()
+ if raise_on_any_failure:
+ parts = list(response.parts())
+ if any(p for p in parts if not 200 <= p.status_code < 300):
+ error = PartialBatchErrorException(
+ message="There is a partial failure in the batch operation.",
+ response=response, parts=parts
+ )
+ raise error
+ return iter(parts)
+ return parts
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+class TransportWrapper(HttpTransport):
+ """Wrapper class that ensures that an inner client created
+ by a `get_client` method does not close the outer transport for the parent
+ when used in a context manager.
+ """
+ def __init__(self, transport):
+ self._transport = transport
+
+ def send(self, request, **kwargs):
+ return self._transport.send(request, **kwargs)
+
+ def open(self):
+ pass
+
+ def close(self):
+ pass
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args): # pylint: disable=arguments-differ
+ pass
+
+
+def _format_shared_key_credential(account_name, credential):
+ if isinstance(credential, six.string_types):
+ if not account_name:
+ raise ValueError("Unable to determine account name for shared key credential.")
+ credential = {"account_name": account_name, "account_key": credential}
+ if isinstance(credential, dict):
+ if "account_name" not in credential:
+ raise ValueError("Shared key credential missing 'account_name")
+ if "account_key" not in credential:
+ raise ValueError("Shared key credential missing 'account_key")
+ return SharedKeyCredentialPolicy(**credential)
+ return credential
+
+
+def parse_connection_str(conn_str, credential, service):
+ conn_str = conn_str.rstrip(";")
+ conn_settings = [s.split("=", 1) for s in conn_str.split(";")]
+ if any(len(tup) != 2 for tup in conn_settings):
+ raise ValueError("Connection string is either blank or malformed.")
+ conn_settings = dict(conn_settings)
+ endpoints = _SERVICE_PARAMS[service]
+ primary = None
+ secondary = None
+ if not credential:
+ try:
+ credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]}
+ except KeyError:
+ credential = conn_settings.get("SharedAccessSignature")
+ if endpoints["primary"] in conn_settings:
+ primary = conn_settings[endpoints["primary"]]
+ if endpoints["secondary"] in conn_settings:
+ secondary = conn_settings[endpoints["secondary"]]
+ else:
+ if endpoints["secondary"] in conn_settings:
+ raise ValueError("Connection string specifies only secondary endpoint.")
+ try:
+ primary = "{}://{}.{}.{}".format(
+ conn_settings["DefaultEndpointsProtocol"],
+ conn_settings["AccountName"],
+ service,
+ conn_settings["EndpointSuffix"],
+ )
+ secondary = "{}-secondary.{}.{}".format(
+ conn_settings["AccountName"], service, conn_settings["EndpointSuffix"]
+ )
+ except KeyError:
+ pass
+
+ if not primary:
+ try:
+ primary = "https://{}.{}.{}".format(
+ conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE)
+ )
+ except KeyError:
+ raise ValueError("Connection string missing required connection details.")
+ return primary, secondary, credential
+
+
+def create_configuration(**kwargs):
+ # type: (**Any) -> Configuration
+ config = Configuration(**kwargs)
+ config.headers_policy = StorageHeadersPolicy(**kwargs)
+ config.user_agent_policy = UserAgentPolicy(
+ sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs)
+ config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs)
+ config.logging_policy = StorageLoggingPolicy(**kwargs)
+ config.proxy_policy = ProxyPolicy(**kwargs)
+
+ # Storage settings
+ config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024)
+ config.copy_polling_interval = 15
+
+ # Block blob uploads
+ config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024)
+ config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1)
+ config.use_byte_buffer = kwargs.get("use_byte_buffer", False)
+
+ # Page blob uploads
+ config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024)
+
+ # Blob downloads
+ config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024)
+ config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024)
+
+ # File uploads
+ config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024)
+ return config
+
+
+def parse_query(query_str):
+ sas_values = QueryStringConstants.to_list()
+ parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()}
+ sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values]
+ sas_token = None
+ if sas_params:
+ sas_token = "&".join(sas_params)
+
+ snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot")
+ return snapshot, sas_token
+
+
+def is_credential_sastoken(credential):
+ if not credential or not isinstance(credential, six.string_types):
+ return False
+
+ sas_values = QueryStringConstants.to_list()
+ parsed_query = parse_qs(credential.lstrip("?"))
+ if parsed_query and all([k in sas_values for k in parsed_query.keys()]):
+ return True
+ return False
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/base_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/base_client_async.py
new file mode 100644
index 00000000000..1fec883b506
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/base_client_async.py
@@ -0,0 +1,185 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
+ TYPE_CHECKING
+)
+import logging
+from azure.core.pipeline import AsyncPipeline
+from azure.core.async_paging import AsyncList
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline.policies import (
+ ContentDecodePolicy,
+ AsyncBearerTokenCredentialPolicy,
+ AsyncRedirectPolicy,
+ DistributedTracingPolicy,
+ HttpLoggingPolicy,
+)
+from azure.core.pipeline.transport import AsyncHttpTransport
+
+from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT
+from .authentication import SharedKeyCredentialPolicy
+from .base_client import create_configuration
+from .policies import (
+ StorageContentValidation,
+ StorageRequestHook,
+ StorageHosts,
+ StorageHeadersPolicy,
+ QueueMessagePolicy
+)
+from .policies_async import AsyncStorageResponseHook
+
+from .._generated.models import StorageErrorException
+from .response_handlers import process_storage_error, PartialBatchErrorException
+
+if TYPE_CHECKING:
+ from azure.core.pipeline import Pipeline
+ from azure.core.pipeline.transport import HttpRequest
+ from azure.core.configuration import Configuration
+_LOGGER = logging.getLogger(__name__)
+
+
+class AsyncStorageAccountHostsMixin(object):
+
+ def __enter__(self):
+ raise TypeError("Async client only supports 'async with'.")
+
+ def __exit__(self, *args):
+ pass
+
+ async def __aenter__(self):
+ await self._client.__aenter__()
+ return self
+
+ async def __aexit__(self, *args):
+ await self._client.__aexit__(*args)
+
+ async def close(self):
+ """ This method is to close the sockets opened by the client.
+ It need not be used when using with a context manager.
+ """
+ await self._client.close()
+
+ def _create_pipeline(self, credential, **kwargs):
+ # type: (Any, **Any) -> Tuple[Configuration, Pipeline]
+ self._credential_policy = None
+ if hasattr(credential, 'get_token'):
+ self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)
+ elif isinstance(credential, SharedKeyCredentialPolicy):
+ self._credential_policy = credential
+ elif credential is not None:
+ raise TypeError("Unsupported credential: {}".format(credential))
+ config = kwargs.get('_configuration') or create_configuration(**kwargs)
+ if kwargs.get('_pipeline'):
+ return config, kwargs['_pipeline']
+ config.transport = kwargs.get('transport') # type: ignore
+ kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT)
+ kwargs.setdefault("read_timeout", READ_TIMEOUT)
+ if not config.transport:
+ try:
+ from azure.core.pipeline.transport import AioHttpTransport
+ except ImportError:
+ raise ImportError("Unable to create async transport. Please check aiohttp is installed.")
+ config.transport = AioHttpTransport(**kwargs)
+ policies = [
+ QueueMessagePolicy(),
+ config.headers_policy,
+ config.proxy_policy,
+ config.user_agent_policy,
+ StorageContentValidation(),
+ StorageRequestHook(**kwargs),
+ self._credential_policy,
+ ContentDecodePolicy(response_encoding="utf-8"),
+ AsyncRedirectPolicy(**kwargs),
+ StorageHosts(hosts=self._hosts, **kwargs), # type: ignore
+ config.retry_policy,
+ config.logging_policy,
+ AsyncStorageResponseHook(**kwargs),
+ DistributedTracingPolicy(**kwargs),
+ HttpLoggingPolicy(**kwargs),
+ ]
+ if kwargs.get("_additional_pipeline_policies"):
+ policies = policies + kwargs.get("_additional_pipeline_policies")
+ return config, AsyncPipeline(config.transport, policies=policies)
+
+ async def _batch_send(
+ self, *reqs: 'HttpRequest',
+ **kwargs
+ ):
+ """Given a series of request, do a Storage batch call.
+ """
+ # Pop it here, so requests doesn't feel bad about additional kwarg
+ raise_on_any_failure = kwargs.pop("raise_on_any_failure", True)
+ request = self._client._client.post( # pylint: disable=protected-access
+ url='{}://{}/?comp=batch{}{}'.format(
+ self.scheme,
+ self.primary_hostname,
+ kwargs.pop('sas', None),
+ kwargs.pop('timeout', None)
+ ),
+ headers={
+ 'x-ms-version': self.api_version
+ }
+ )
+
+ policies = [StorageHeadersPolicy()]
+ if self._credential_policy:
+ policies.append(self._credential_policy)
+
+ request.set_multipart_mixed(
+ *reqs,
+ policies=policies,
+ enforce_https=False
+ )
+
+ pipeline_response = await self._pipeline.run(
+ request, **kwargs
+ )
+ response = pipeline_response.http_response
+
+ try:
+ if response.status_code not in [202]:
+ raise HttpResponseError(response=response)
+ parts = response.parts() # Return an AsyncIterator
+ if raise_on_any_failure:
+ parts_list = []
+ async for part in parts:
+ parts_list.append(part)
+ if any(p for p in parts_list if not 200 <= p.status_code < 300):
+ error = PartialBatchErrorException(
+ message="There is a partial failure in the batch operation.",
+ response=response, parts=parts_list
+ )
+ raise error
+ return AsyncList(parts_list)
+ return parts
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+
+class AsyncTransportWrapper(AsyncHttpTransport):
+ """Wrapper class that ensures that an inner client created
+ by a `get_client` method does not close the outer transport for the parent
+ when used in a context manager.
+ """
+ def __init__(self, async_transport):
+ self._transport = async_transport
+
+ async def send(self, request, **kwargs):
+ return await self._transport.send(request, **kwargs)
+
+ async def open(self):
+ pass
+
+ async def close(self):
+ pass
+
+ async def __aenter__(self):
+ pass
+
+ async def __aexit__(self, *args): # pylint: disable=arguments-differ
+ pass
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/constants.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/constants.py
new file mode 100644
index 00000000000..f67ea29cc13
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/constants.py
@@ -0,0 +1,27 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import sys
+from .._generated.version import VERSION
+
+
+X_MS_VERSION = VERSION
+
+# Socket timeout in seconds
+CONNECTION_TIMEOUT = 20
+READ_TIMEOUT = 20
+
+# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
+# The socket timeout is now the maximum total duration to send all data.
+if sys.version_info >= (3, 5):
+ # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds
+ # the 80000 seconds was calculated with:
+ # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
+ READ_TIMEOUT = 80000
+
+STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default"
+
+SERVICE_HOST_BASE = 'core.windows.net'
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/encryption.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/encryption.py
new file mode 100644
index 00000000000..62607cc0cf8
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/encryption.py
@@ -0,0 +1,542 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import os
+from os import urandom
+from json import (
+ dumps,
+ loads,
+)
+from collections import OrderedDict
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import Cipher
+from cryptography.hazmat.primitives.ciphers.algorithms import AES
+from cryptography.hazmat.primitives.ciphers.modes import CBC
+from cryptography.hazmat.primitives.padding import PKCS7
+
+from azure.core.exceptions import HttpResponseError
+
+from .._version import VERSION
+from . import encode_base64, decode_base64_to_bytes
+
+
+_ENCRYPTION_PROTOCOL_V1 = '1.0'
+_ERROR_OBJECT_INVALID = \
+ '{0} does not define a complete interface. Value of {1} is either missing or invalid.'
+
+
+def _validate_not_none(param_name, param):
+ if param is None:
+ raise ValueError('{0} should not be None.'.format(param_name))
+
+
+def _validate_key_encryption_key_wrap(kek):
+ # Note that None is not callable and so will fail the second clause of each check.
+ if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key'))
+ if not hasattr(kek, 'get_kid') or not callable(kek.get_kid):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
+ if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm'))
+
+
+class _EncryptionAlgorithm(object):
+ '''
+ Specifies which client encryption algorithm is used.
+ '''
+ AES_CBC_256 = 'AES_CBC_256'
+
+
+class _WrappedContentKey:
+ '''
+ Represents the envelope key details stored on the service.
+ '''
+
+ def __init__(self, algorithm, encrypted_key, key_id):
+ '''
+ :param str algorithm:
+ The algorithm used for wrapping.
+ :param bytes encrypted_key:
+ The encrypted content-encryption-key.
+ :param str key_id:
+ The key-encryption-key identifier string.
+ '''
+
+ _validate_not_none('algorithm', algorithm)
+ _validate_not_none('encrypted_key', encrypted_key)
+ _validate_not_none('key_id', key_id)
+
+ self.algorithm = algorithm
+ self.encrypted_key = encrypted_key
+ self.key_id = key_id
+
+
+class _EncryptionAgent:
+ '''
+ Represents the encryption agent stored on the service.
+ It consists of the encryption protocol version and encryption algorithm used.
+ '''
+
+ def __init__(self, encryption_algorithm, protocol):
+ '''
+ :param _EncryptionAlgorithm encryption_algorithm:
+ The algorithm used for encrypting the message contents.
+ :param str protocol:
+ The protocol version used for encryption.
+ '''
+
+ _validate_not_none('encryption_algorithm', encryption_algorithm)
+ _validate_not_none('protocol', protocol)
+
+ self.encryption_algorithm = str(encryption_algorithm)
+ self.protocol = protocol
+
+
+class _EncryptionData:
+ '''
+ Represents the encryption data that is stored on the service.
+ '''
+
+ def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key,
+ key_wrapping_metadata):
+ '''
+ :param bytes content_encryption_IV:
+ The content encryption initialization vector.
+ :param _EncryptionAgent encryption_agent:
+ The encryption agent.
+ :param _WrappedContentKey wrapped_content_key:
+ An object that stores the wrapping algorithm, the key identifier,
+ and the encrypted key bytes.
+ :param dict key_wrapping_metadata:
+ A dict containing metadata related to the key wrapping.
+ '''
+
+ _validate_not_none('content_encryption_IV', content_encryption_IV)
+ _validate_not_none('encryption_agent', encryption_agent)
+ _validate_not_none('wrapped_content_key', wrapped_content_key)
+
+ self.content_encryption_IV = content_encryption_IV
+ self.encryption_agent = encryption_agent
+ self.wrapped_content_key = wrapped_content_key
+ self.key_wrapping_metadata = key_wrapping_metadata
+
+
+def _generate_encryption_data_dict(kek, cek, iv):
+ '''
+ Generates and returns the encryption metadata as a dict.
+
+ :param object kek: The key encryption key. See calling functions for more information.
+ :param bytes cek: The content encryption key.
+ :param bytes iv: The initialization vector.
+ :return: A dict containing all the encryption metadata.
+ :rtype: dict
+ '''
+ # Encrypt the cek.
+ wrapped_cek = kek.wrap_key(cek)
+
+ # Build the encryption_data dict.
+ # Use OrderedDict to comply with Java's ordering requirement.
+ wrapped_content_key = OrderedDict()
+ wrapped_content_key['KeyId'] = kek.get_kid()
+ wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek)
+ wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm()
+
+ encryption_agent = OrderedDict()
+ encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1
+ encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256
+
+ encryption_data_dict = OrderedDict()
+ encryption_data_dict['WrappedContentKey'] = wrapped_content_key
+ encryption_data_dict['EncryptionAgent'] = encryption_agent
+ encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv)
+ encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION}
+
+ return encryption_data_dict
+
+
+def _dict_to_encryption_data(encryption_data_dict):
+ '''
+ Converts the specified dictionary to an EncryptionData object for
+ eventual use in decryption.
+
+ :param dict encryption_data_dict:
+ The dictionary containing the encryption data.
+ :return: an _EncryptionData object built from the dictionary.
+ :rtype: _EncryptionData
+ '''
+ try:
+ if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1:
+ raise ValueError("Unsupported encryption version.")
+ except KeyError:
+ raise ValueError("Unsupported encryption version.")
+ wrapped_content_key = encryption_data_dict['WrappedContentKey']
+ wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'],
+ decode_base64_to_bytes(wrapped_content_key['EncryptedKey']),
+ wrapped_content_key['KeyId'])
+
+ encryption_agent = encryption_data_dict['EncryptionAgent']
+ encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'],
+ encryption_agent['Protocol'])
+
+ if 'KeyWrappingMetadata' in encryption_data_dict:
+ key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata']
+ else:
+ key_wrapping_metadata = None
+
+ encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']),
+ encryption_agent,
+ wrapped_content_key,
+ key_wrapping_metadata)
+
+ return encryption_data
+
+
+def _generate_AES_CBC_cipher(cek, iv):
+ '''
+ Generates and returns an encryption cipher for AES CBC using the given cek and iv.
+
+ :param bytes[] cek: The content encryption key for the cipher.
+ :param bytes[] iv: The initialization vector for the cipher.
+ :return: A cipher for encrypting in AES256 CBC.
+ :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
+ '''
+
+ backend = default_backend()
+ algorithm = AES(cek)
+ mode = CBC(iv)
+ return Cipher(algorithm, mode, backend)
+
+
+def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None):
+ '''
+ Extracts and returns the content_encryption_key stored in the encryption_data object
+ and performs necessary validation on all parameters.
+ :param _EncryptionData encryption_data:
+ The encryption metadata of the retrieved value.
+ :param obj key_encryption_key:
+ The key_encryption_key used to unwrap the cek. Please refer to high-level service object
+ instance variables for more details.
+ :param func key_resolver:
+ A function used that, given a key_id, will return a key_encryption_key. Please refer
+ to high-level service object instance variables for more details.
+ :return: the content_encryption_key stored in the encryption_data object.
+ :rtype: bytes[]
+ '''
+
+ _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV)
+ _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key)
+
+ if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol:
+ raise ValueError('Encryption version is not supported.')
+
+ content_encryption_key = None
+
+ # If the resolver exists, give priority to the key it finds.
+ if key_resolver is not None:
+ key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id)
+
+ _validate_not_none('key_encryption_key', key_encryption_key)
+ if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid'))
+ if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key):
+ raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key'))
+ if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid():
+ raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.')
+ # Will throw an exception if the specified algorithm is not supported.
+ content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key,
+ encryption_data.wrapped_content_key.algorithm)
+ _validate_not_none('content_encryption_key', content_encryption_key)
+
+ return content_encryption_key
+
+
+def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None):
+ '''
+ Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding.
+ Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek).
+ Returns the original plaintex.
+
+ :param str message:
+ The ciphertext to be decrypted.
+ :param _EncryptionData encryption_data:
+ The metadata associated with this ciphertext.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ unwrap_key(key, algorithm)
+ - returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
+ get_kid()
+ - returns a string key id for this key-encryption-key.
+ :param function resolver(kid):
+ The user-provided key resolver. Uses the kid string to return a key-encryption-key
+ implementing the interface defined above.
+ :return: The decrypted plaintext.
+ :rtype: str
+ '''
+ _validate_not_none('message', message)
+ content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver)
+
+ if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm:
+ raise ValueError('Specified encryption algorithm is not supported.')
+
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV)
+
+ # decrypt data
+ decrypted_data = message
+ decryptor = cipher.decryptor()
+ decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize())
+
+ # unpad data
+ unpadder = PKCS7(128).unpadder()
+ decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
+
+ return decrypted_data
+
+
+def encrypt_blob(blob, key_encryption_key):
+ '''
+ Encrypts the given blob using AES256 in CBC mode with 128 bit padding.
+ Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
+ Returns a json-formatted string containing the encryption metadata. This method should
+ only be used when a blob is small enough for single shot upload. Encrypting larger blobs
+ is done as a part of the upload_data_chunks method.
+
+ :param bytes blob:
+ The blob to be encrypted.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
+ get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
+ get_kid()--returns a string key id for this key-encryption-key.
+ :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data.
+ :rtype: (str, bytes)
+ '''
+
+ _validate_not_none('blob', blob)
+ _validate_not_none('key_encryption_key', key_encryption_key)
+ _validate_key_encryption_key_wrap(key_encryption_key)
+
+ # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
+ content_encryption_key = urandom(32)
+ initialization_vector = urandom(16)
+
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
+
+ # PKCS7 with 16 byte blocks ensures compatibility with AES.
+ padder = PKCS7(128).padder()
+ padded_data = padder.update(blob) + padder.finalize()
+
+ # Encrypt the data.
+ encryptor = cipher.encryptor()
+ encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
+ encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key,
+ initialization_vector)
+ encryption_data['EncryptionMode'] = 'FullBlob'
+
+ return dumps(encryption_data), encrypted_data
+
+
+def generate_blob_encryption_data(key_encryption_key):
+ '''
+ Generates the encryption_metadata for the blob.
+
+ :param bytes key_encryption_key:
+ The key-encryption-key used to wrap the cek associate with this blob.
+ :return: A tuple containing the cek and iv for this blob as well as the
+ serialized encryption metadata for the blob.
+ :rtype: (bytes, bytes, str)
+ '''
+ encryption_data = None
+ content_encryption_key = None
+ initialization_vector = None
+ if key_encryption_key:
+ _validate_key_encryption_key_wrap(key_encryption_key)
+ content_encryption_key = urandom(32)
+ initialization_vector = urandom(16)
+ encryption_data = _generate_encryption_data_dict(key_encryption_key,
+ content_encryption_key,
+ initialization_vector)
+ encryption_data['EncryptionMode'] = 'FullBlob'
+ encryption_data = dumps(encryption_data)
+
+ return content_encryption_key, initialization_vector, encryption_data
+
+
+def decrypt_blob(require_encryption, key_encryption_key, key_resolver,
+ content, start_offset, end_offset, response_headers):
+ '''
+ Decrypts the given blob contents and returns only the requested range.
+
+ :param bool require_encryption:
+ Whether or not the calling blob service requires objects to be decrypted.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
+ get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
+ get_kid()--returns a string key id for this key-encryption-key.
+ :param key_resolver(kid):
+ The user-provided key resolver. Uses the kid string to return a key-encryption-key
+ implementing the interface defined above.
+ :return: The decrypted blob content.
+ :rtype: bytes
+ '''
+ try:
+ encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata']))
+ except: # pylint: disable=bare-except
+ if require_encryption:
+ raise ValueError(
+ 'Encryption required, but received data does not contain appropriate metatadata.' + \
+ 'Data was either not encrypted or metadata has been lost.')
+
+ return content
+
+ if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256:
+ raise ValueError('Specified encryption algorithm is not supported.')
+
+ blob_type = response_headers['x-ms-blob-type']
+
+ iv = None
+ unpad = False
+ if 'content-range' in response_headers:
+ content_range = response_headers['content-range']
+ # Format: 'bytes x-y/size'
+
+ # Ignore the word 'bytes'
+ content_range = content_range.split(' ')
+
+ content_range = content_range[1].split('-')
+ content_range = content_range[1].split('/')
+ end_range = int(content_range[0])
+ blob_size = int(content_range[1])
+
+ if start_offset >= 16:
+ iv = content[:16]
+ content = content[16:]
+ start_offset -= 16
+ else:
+ iv = encryption_data.content_encryption_IV
+
+ if end_range == blob_size - 1:
+ unpad = True
+ else:
+ unpad = True
+ iv = encryption_data.content_encryption_IV
+
+ if blob_type == 'PageBlob':
+ unpad = False
+
+ content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver)
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, iv)
+ decryptor = cipher.decryptor()
+
+ content = decryptor.update(content) + decryptor.finalize()
+ if unpad:
+ unpadder = PKCS7(128).unpadder()
+ content = unpadder.update(content) + unpadder.finalize()
+
+ return content[start_offset: len(content) - end_offset]
+
+
+def get_blob_encryptor_and_padder(cek, iv, should_pad):
+ encryptor = None
+ padder = None
+
+ if cek is not None and iv is not None:
+ cipher = _generate_AES_CBC_cipher(cek, iv)
+ encryptor = cipher.encryptor()
+ padder = PKCS7(128).padder() if should_pad else None
+
+ return encryptor, padder
+
+
+def encrypt_queue_message(message, key_encryption_key):
+ '''
+ Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding.
+ Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek).
+ Returns a json-formatted string containing the encrypted message and the encryption metadata.
+
+ :param object message:
+ The plain text messge to be encrypted.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ wrap_key(key)--wraps the specified key using an algorithm of the user's choice.
+ get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
+ get_kid()--returns a string key id for this key-encryption-key.
+ :return: A json-formatted string containing the encrypted message and the encryption metadata.
+ :rtype: str
+ '''
+
+ _validate_not_none('message', message)
+ _validate_not_none('key_encryption_key', key_encryption_key)
+ _validate_key_encryption_key_wrap(key_encryption_key)
+
+ # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks
+ content_encryption_key = os.urandom(32)
+ initialization_vector = os.urandom(16)
+
+ # Queue encoding functions all return unicode strings, and encryption should
+ # operate on binary strings.
+ message = message.encode('utf-8')
+
+ cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector)
+
+ # PKCS7 with 16 byte blocks ensures compatibility with AES.
+ padder = PKCS7(128).padder()
+ padded_data = padder.update(message) + padder.finalize()
+
+ # Encrypt the data.
+ encryptor = cipher.encryptor()
+ encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
+
+ # Build the dictionary structure.
+ queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data),
+ 'EncryptionData': _generate_encryption_data_dict(key_encryption_key,
+ content_encryption_key,
+ initialization_vector)}
+
+ return dumps(queue_message)
+
+
+def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver):
+ '''
+ Returns the decrypted message contents from an EncryptedQueueMessage.
+ If no encryption metadata is present, will return the unaltered message.
+ :param str message:
+ The JSON formatted QueueEncryptedMessage contents with all associated metadata.
+ :param bool require_encryption:
+ If set, will enforce that the retrieved messages are encrypted and decrypt them.
+ :param object key_encryption_key:
+ The user-provided key-encryption-key. Must implement the following methods:
+ unwrap_key(key, algorithm)
+ - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm.
+ get_kid()
+ - returns a string key id for this key-encryption-key.
+ :param function resolver(kid):
+ The user-provided key resolver. Uses the kid string to return a key-encryption-key
+ implementing the interface defined above.
+ :return: The plain text message from the queue message.
+ :rtype: str
+ '''
+
+ try:
+ message = loads(message)
+
+ encryption_data = _dict_to_encryption_data(message['EncryptionData'])
+ decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents'])
+ except (KeyError, ValueError):
+ # Message was not json formatted and so was not encrypted
+ # or the user provided a json formatted message.
+ if require_encryption:
+ raise ValueError('Message was not encrypted.')
+
+ return message
+ try:
+ return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8')
+ except Exception as error:
+ raise HttpResponseError(
+ message="Decryption failed.",
+ response=response,
+ error=error)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/models.py
new file mode 100644
index 00000000000..c51356bd885
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/models.py
@@ -0,0 +1,466 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-instance-attributes
+from enum import Enum
+
+
+def get_enum_value(value):
+ if value is None or value in ["None", ""]:
+ return None
+ try:
+ return value.value
+ except AttributeError:
+ return value
+
+
+class StorageErrorCode(str, Enum):
+
+ # Generic storage values
+ account_already_exists = "AccountAlreadyExists"
+ account_being_created = "AccountBeingCreated"
+ account_is_disabled = "AccountIsDisabled"
+ authentication_failed = "AuthenticationFailed"
+ authorization_failure = "AuthorizationFailure"
+ no_authentication_information = "NoAuthenticationInformation"
+ condition_headers_not_supported = "ConditionHeadersNotSupported"
+ condition_not_met = "ConditionNotMet"
+ empty_metadata_key = "EmptyMetadataKey"
+ insufficient_account_permissions = "InsufficientAccountPermissions"
+ internal_error = "InternalError"
+ invalid_authentication_info = "InvalidAuthenticationInfo"
+ invalid_header_value = "InvalidHeaderValue"
+ invalid_http_verb = "InvalidHttpVerb"
+ invalid_input = "InvalidInput"
+ invalid_md5 = "InvalidMd5"
+ invalid_metadata = "InvalidMetadata"
+ invalid_query_parameter_value = "InvalidQueryParameterValue"
+ invalid_range = "InvalidRange"
+ invalid_resource_name = "InvalidResourceName"
+ invalid_uri = "InvalidUri"
+ invalid_xml_document = "InvalidXmlDocument"
+ invalid_xml_node_value = "InvalidXmlNodeValue"
+ md5_mismatch = "Md5Mismatch"
+ metadata_too_large = "MetadataTooLarge"
+ missing_content_length_header = "MissingContentLengthHeader"
+ missing_required_query_parameter = "MissingRequiredQueryParameter"
+ missing_required_header = "MissingRequiredHeader"
+ missing_required_xml_node = "MissingRequiredXmlNode"
+ multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported"
+ operation_timed_out = "OperationTimedOut"
+ out_of_range_input = "OutOfRangeInput"
+ out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue"
+ request_body_too_large = "RequestBodyTooLarge"
+ resource_type_mismatch = "ResourceTypeMismatch"
+ request_url_failed_to_parse = "RequestUrlFailedToParse"
+ resource_already_exists = "ResourceAlreadyExists"
+ resource_not_found = "ResourceNotFound"
+ server_busy = "ServerBusy"
+ unsupported_header = "UnsupportedHeader"
+ unsupported_xml_node = "UnsupportedXmlNode"
+ unsupported_query_parameter = "UnsupportedQueryParameter"
+ unsupported_http_verb = "UnsupportedHttpVerb"
+
+ # Blob values
+ append_position_condition_not_met = "AppendPositionConditionNotMet"
+ blob_already_exists = "BlobAlreadyExists"
+ blob_not_found = "BlobNotFound"
+ blob_overwritten = "BlobOverwritten"
+ blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength"
+ block_count_exceeds_limit = "BlockCountExceedsLimit"
+ block_list_too_long = "BlockListTooLong"
+ cannot_change_to_lower_tier = "CannotChangeToLowerTier"
+ cannot_verify_copy_source = "CannotVerifyCopySource"
+ container_already_exists = "ContainerAlreadyExists"
+ container_being_deleted = "ContainerBeingDeleted"
+ container_disabled = "ContainerDisabled"
+ container_not_found = "ContainerNotFound"
+ content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit"
+ copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported"
+ copy_id_mismatch = "CopyIdMismatch"
+ feature_version_mismatch = "FeatureVersionMismatch"
+ incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch"
+ incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
+ incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot"
+ infinite_lease_duration_required = "InfiniteLeaseDurationRequired"
+ invalid_blob_or_block = "InvalidBlobOrBlock"
+ invalid_blob_tier = "InvalidBlobTier"
+ invalid_blob_type = "InvalidBlobType"
+ invalid_block_id = "InvalidBlockId"
+ invalid_block_list = "InvalidBlockList"
+ invalid_operation = "InvalidOperation"
+ invalid_page_range = "InvalidPageRange"
+ invalid_source_blob_type = "InvalidSourceBlobType"
+ invalid_source_blob_url = "InvalidSourceBlobUrl"
+ invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation"
+ lease_already_present = "LeaseAlreadyPresent"
+ lease_already_broken = "LeaseAlreadyBroken"
+ lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation"
+ lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation"
+ lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation"
+ lease_id_missing = "LeaseIdMissing"
+ lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired"
+ lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged"
+ lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed"
+ lease_lost = "LeaseLost"
+ lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation"
+ lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation"
+ lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation"
+ max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet"
+ no_pending_copy_operation = "NoPendingCopyOperation"
+ operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob"
+ pending_copy_operation = "PendingCopyOperation"
+ previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer"
+ previous_snapshot_not_found = "PreviousSnapshotNotFound"
+ previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported"
+ sequence_number_condition_not_met = "SequenceNumberConditionNotMet"
+ sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge"
+ snapshot_count_exceeded = "SnapshotCountExceeded"
+ snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded"
+ snapshots_present = "SnapshotsPresent"
+ source_condition_not_met = "SourceConditionNotMet"
+ system_in_use = "SystemInUse"
+ target_condition_not_met = "TargetConditionNotMet"
+ unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite"
+ blob_being_rehydrated = "BlobBeingRehydrated"
+ blob_archived = "BlobArchived"
+ blob_not_archived = "BlobNotArchived"
+
+ # Queue values
+ invalid_marker = "InvalidMarker"
+ message_not_found = "MessageNotFound"
+ message_too_large = "MessageTooLarge"
+ pop_receipt_mismatch = "PopReceiptMismatch"
+ queue_already_exists = "QueueAlreadyExists"
+ queue_being_deleted = "QueueBeingDeleted"
+ queue_disabled = "QueueDisabled"
+ queue_not_empty = "QueueNotEmpty"
+ queue_not_found = "QueueNotFound"
+
+ # File values
+ cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory"
+ client_cache_flush_delay = "ClientCacheFlushDelay"
+ delete_pending = "DeletePending"
+ directory_not_empty = "DirectoryNotEmpty"
+ file_lock_conflict = "FileLockConflict"
+ invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName"
+ parent_not_found = "ParentNotFound"
+ read_only_attribute = "ReadOnlyAttribute"
+ share_already_exists = "ShareAlreadyExists"
+ share_being_deleted = "ShareBeingDeleted"
+ share_disabled = "ShareDisabled"
+ share_not_found = "ShareNotFound"
+ sharing_violation = "SharingViolation"
+ share_snapshot_in_progress = "ShareSnapshotInProgress"
+ share_snapshot_count_exceeded = "ShareSnapshotCountExceeded"
+ share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported"
+ share_has_snapshots = "ShareHasSnapshots"
+ container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed"
+
+ # DataLake values
+ content_length_must_be_zero = 'ContentLengthMustBeZero'
+ path_already_exists = 'PathAlreadyExists'
+ invalid_flush_position = 'InvalidFlushPosition'
+ invalid_property_name = 'InvalidPropertyName'
+ invalid_source_uri = 'InvalidSourceUri'
+ unsupported_rest_version = 'UnsupportedRestVersion'
+ file_system_not_found = 'FilesystemNotFound'
+ path_not_found = 'PathNotFound'
+ rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound'
+ source_path_not_found = 'SourcePathNotFound'
+ destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted'
+ file_system_already_exists = 'FilesystemAlreadyExists'
+ file_system_being_deleted = 'FilesystemBeingDeleted'
+ invalid_destination_path = 'InvalidDestinationPath'
+ invalid_rename_source_path = 'InvalidRenameSourcePath'
+ invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType'
+ lease_is_already_broken = 'LeaseIsAlreadyBroken'
+ lease_name_mismatch = 'LeaseNameMismatch'
+ path_conflict = 'PathConflict'
+ source_path_is_being_deleted = 'SourcePathIsBeingDeleted'
+
+
+class DictMixin(object):
+
+ def __setitem__(self, key, item):
+ self.__dict__[key] = item
+
+ def __getitem__(self, key):
+ return self.__dict__[key]
+
+ def __repr__(self):
+ return str(self)
+
+ def __len__(self):
+ return len(self.keys())
+
+ def __delitem__(self, key):
+ self.__dict__[key] = None
+
+ def __eq__(self, other):
+ """Compare objects by comparing all attributes."""
+ if isinstance(other, self.__class__):
+ return self.__dict__ == other.__dict__
+ return False
+
+ def __ne__(self, other):
+ """Compare objects by comparing all attributes."""
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')})
+
+ def has_key(self, k):
+ return k in self.__dict__
+
+ def update(self, *args, **kwargs):
+ return self.__dict__.update(*args, **kwargs)
+
+ def keys(self):
+ return [k for k in self.__dict__ if not k.startswith('_')]
+
+ def values(self):
+ return [v for k, v in self.__dict__.items() if not k.startswith('_')]
+
+ def items(self):
+ return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')]
+
+ def get(self, key, default=None):
+ if key in self.__dict__:
+ return self.__dict__[key]
+ return default
+
+
+class LocationMode(object):
+ """
+ Specifies the location the request should be sent to. This mode only applies
+ for RA-GRS accounts which allow secondary read access. All other account types
+ must use PRIMARY.
+ """
+
+ PRIMARY = 'primary' #: Requests should be sent to the primary location.
+ SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible.
+
+
+class ResourceTypes(object):
+ """
+ Specifies the resource types that are accessible with the account SAS.
+
+ :param bool service:
+ Access to service-level APIs (e.g., Get/Set Service Properties,
+ Get Service Stats, List Containers/Queues/Shares)
+ :param bool container:
+ Access to container-level APIs (e.g., Create/Delete Container,
+ Create/Delete Queue, Create/Delete Share,
+ List Blobs/Files and Directories)
+ :param bool object:
+ Access to object-level APIs for blobs, queue messages, and
+ files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)
+ """
+
+ def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin
+ self.service = service
+ self.container = container
+ self.object = object
+ self._str = (('s' if self.service else '') +
+ ('c' if self.container else '') +
+ ('o' if self.object else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, string):
+ """Create a ResourceTypes from a string.
+
+ To specify service, container, or object you need only to
+ include the first letter of the word in the string. E.g. service and container,
+ you would provide a string "sc".
+
+ :param str string: Specify service, container, or object in
+ in the string with the first letter of the word.
+ :return: A ResourceTypes object
+ :rtype: ~azure.storage.blob.ResourceTypes
+ """
+ res_service = 's' in string
+ res_container = 'c' in string
+ res_object = 'o' in string
+
+ parsed = cls(res_service, res_container, res_object)
+ parsed._str = string # pylint: disable = protected-access
+ return parsed
+
+
+class AccountSasPermissions(object):
+ """
+ :class:`~ResourceTypes` class to be used with generate_account_sas
+ function and for the AccessPolicies used with set_*_acl. There are two types of
+ SAS which may be used to grant resource access. One is to grant access to a
+ specific resource (resource-specific). Another is to grant access to the
+ entire service for a specific account and allow certain operations based on
+ perms found here.
+
+ :param bool read:
+ Valid for all signed resources types (Service, Container, and Object).
+ Permits read permissions to the specified resource type.
+ :param bool write:
+ Valid for all signed resources types (Service, Container, and Object).
+ Permits write permissions to the specified resource type.
+ :param bool delete:
+ Valid for Container and Object resource types, except for queue messages.
+ :param bool delete_previous_version:
+ Delete the previous blob version for the versioning enabled storage account.
+ :param bool list:
+ Valid for Service and Container resource types only.
+ :param bool add:
+ Valid for the following Object resource types only: queue messages, and append blobs.
+ :param bool create:
+ Valid for the following Object resource types only: blobs and files.
+ Users can create new blobs or files, but may not overwrite existing
+ blobs or files.
+ :param bool update:
+ Valid for the following Object resource types only: queue messages.
+ :param bool process:
+ Valid for the following Object resource type only: queue messages.
+ :keyword bool tag:
+ To enable set or get tags on the blobs in the container.
+ :keyword bool filter_by_tags:
+ To enable get blobs by tags, this should be used together with list permission.
+ """
+ def __init__(self, read=False, write=False, delete=False,
+ list=False, # pylint: disable=redefined-builtin
+ add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs):
+ self.read = read
+ self.write = write
+ self.delete = delete
+ self.delete_previous_version = delete_previous_version
+ self.list = list
+ self.add = add
+ self.create = create
+ self.update = update
+ self.process = process
+ self.tag = kwargs.pop('tag', False)
+ self.filter_by_tags = kwargs.pop('filter_by_tags', False)
+ self._str = (('r' if self.read else '') +
+ ('w' if self.write else '') +
+ ('d' if self.delete else '') +
+ ('x' if self.delete_previous_version else '') +
+ ('l' if self.list else '') +
+ ('a' if self.add else '') +
+ ('c' if self.create else '') +
+ ('u' if self.update else '') +
+ ('p' if self.process else '') +
+ ('f' if self.filter_by_tags else '') +
+ ('t' if self.tag else '')
+ )
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, permission):
+ """Create AccountSasPermissions from a string.
+
+ To specify read, write, delete, etc. permissions you need only to
+ include the first letter of the word in the string. E.g. for read and write
+ permissions you would provide a string "rw".
+
+ :param str permission: Specify permissions in
+ the string with the first letter of the word.
+ :return: An AccountSasPermissions object
+ :rtype: ~azure.storage.blob.AccountSasPermissions
+ """
+ p_read = 'r' in permission
+ p_write = 'w' in permission
+ p_delete = 'd' in permission
+ p_delete_previous_version = 'x' in permission
+ p_list = 'l' in permission
+ p_add = 'a' in permission
+ p_create = 'c' in permission
+ p_update = 'u' in permission
+ p_process = 'p' in permission
+ p_tag = 't' in permission
+ p_filter_by_tags = 'f' in permission
+ parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version,
+ list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag,
+ filter_by_tags=p_filter_by_tags)
+
+ return parsed
+
+class Services(object):
+ """Specifies the services accessible with the account SAS.
+
+ :param bool blob:
+ Access for the `~azure.storage.blob.BlobServiceClient`
+ :param bool queue:
+ Access for the `~azure.storage.queue.QueueServiceClient`
+ :param bool fileshare:
+ Access for the `~azure.storage.fileshare.ShareServiceClient`
+ """
+
+ def __init__(self, blob=False, queue=False, fileshare=False):
+ self.blob = blob
+ self.queue = queue
+ self.fileshare = fileshare
+ self._str = (('b' if self.blob else '') +
+ ('q' if self.queue else '') +
+ ('f' if self.fileshare else ''))
+
+ def __str__(self):
+ return self._str
+
+ @classmethod
+ def from_string(cls, string):
+ """Create Services from a string.
+
+ To specify blob, queue, or file you need only to
+ include the first letter of the word in the string. E.g. for blob and queue
+ you would provide a string "bq".
+
+ :param str string: Specify blob, queue, or file in
+ in the string with the first letter of the word.
+ :return: A Services object
+ :rtype: ~azure.storage.blob.Services
+ """
+ res_blob = 'b' in string
+ res_queue = 'q' in string
+ res_file = 'f' in string
+
+ parsed = cls(res_blob, res_queue, res_file)
+ parsed._str = string # pylint: disable = protected-access
+ return parsed
+
+
+class UserDelegationKey(object):
+ """
+ Represents a user delegation key, provided to the user by Azure Storage
+ based on their Azure Active Directory access token.
+
+ The fields are saved as simple strings since the user does not have to interact with this object;
+ to generate an identify SAS, the user can simply pass it to the right API.
+
+ :ivar str signed_oid:
+ Object ID of this token.
+ :ivar str signed_tid:
+ Tenant ID of the tenant that issued this token.
+ :ivar str signed_start:
+ The datetime this token becomes valid.
+ :ivar str signed_expiry:
+ The datetime this token expires.
+ :ivar str signed_service:
+ What service this key is valid for.
+ :ivar str signed_version:
+ The version identifier of the REST service that created this token.
+ :ivar str value:
+ The user delegation key.
+ """
+ def __init__(self):
+ self.signed_oid = None
+ self.signed_tid = None
+ self.signed_start = None
+ self.signed_expiry = None
+ self.signed_service = None
+ self.signed_version = None
+ self.value = None
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/parser.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/parser.py
new file mode 100644
index 00000000000..c6feba8a639
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/parser.py
@@ -0,0 +1,20 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import sys
+
+if sys.version_info < (3,):
+ def _str(value):
+ if isinstance(value, unicode): # pylint: disable=undefined-variable
+ return value.encode('utf-8')
+
+ return str(value)
+else:
+ _str = str
+
+
+def _to_utc_datetime(value):
+ return value.strftime('%Y-%m-%dT%H:%M:%SZ')
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/policies.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/policies.py
new file mode 100644
index 00000000000..c9bc798d671
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/policies.py
@@ -0,0 +1,610 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import base64
+import hashlib
+import re
+import random
+from time import time
+from io import SEEK_SET, UnsupportedOperation
+import logging
+import uuid
+import types
+from typing import Any, TYPE_CHECKING
+from wsgiref.handlers import format_date_time
+try:
+ from urllib.parse import (
+ urlparse,
+ parse_qsl,
+ urlunparse,
+ urlencode,
+ )
+except ImportError:
+ from urllib import urlencode # type: ignore
+ from urlparse import ( # type: ignore
+ urlparse,
+ parse_qsl,
+ urlunparse,
+ )
+
+from azure.core.pipeline.policies import (
+ HeadersPolicy,
+ SansIOHTTPPolicy,
+ NetworkTraceLoggingPolicy,
+ HTTPPolicy,
+ RequestHistory
+)
+from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError
+
+from .models import LocationMode
+
+try:
+ _unicode_type = unicode # type: ignore
+except NameError:
+ _unicode_type = str
+
+if TYPE_CHECKING:
+ from azure.core.pipeline import PipelineRequest, PipelineResponse
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def encode_base64(data):
+ if isinstance(data, _unicode_type):
+ data = data.encode('utf-8')
+ encoded = base64.b64encode(data)
+ return encoded.decode('utf-8')
+
+
+def is_exhausted(settings):
+ """Are we out of retries?"""
+ retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status'])
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+ return min(retry_counts) < 0
+
+
+def retry_hook(settings, **kwargs):
+ if settings['hook']:
+ settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs)
+
+
+def is_retry(response, mode):
+ """Is this method/status code retryable? (Based on whitelists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ status = response.http_response.status_code
+ if 300 <= status < 500:
+ # An exception occured, but in most cases it was expected. Examples could
+ # include a 309 Conflict or 412 Precondition Failed.
+ if status == 404 and mode == LocationMode.SECONDARY:
+ # Response code 404 should be retried if secondary was used.
+ return True
+ if status == 408:
+ # Response code 408 is a timeout and should be retried.
+ return True
+ return False
+ if status >= 500:
+ # Response codes above 500 with the exception of 501 Not Implemented and
+ # 505 Version Not Supported indicate a server issue and should be retried.
+ if status in [501, 505]:
+ return False
+ return True
+ return False
+
+
+def urljoin(base_url, stub_url):
+ parsed = urlparse(base_url)
+ parsed = parsed._replace(path=parsed.path + '/' + stub_url)
+ return parsed.geturl()
+
+
+class QueueMessagePolicy(SansIOHTTPPolicy):
+
+ def on_request(self, request):
+ message_id = request.context.options.pop('queue_message_id', None)
+ if message_id:
+ request.http_request.url = urljoin(
+ request.http_request.url,
+ message_id)
+
+
+class StorageHeadersPolicy(HeadersPolicy):
+ request_id_header_name = 'x-ms-client-request-id'
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ super(StorageHeadersPolicy, self).on_request(request)
+ current_time = format_date_time(time())
+ request.http_request.headers['x-ms-date'] = current_time
+
+ custom_id = request.context.options.pop('client_request_id', None)
+ request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1())
+
+ # def on_response(self, request, response):
+ # # raise exception if the echoed client request id from the service is not identical to the one we sent
+ # if self.request_id_header_name in response.http_response.headers:
+
+ # client_request_id = request.http_request.headers.get(self.request_id_header_name)
+
+ # if response.http_response.headers[self.request_id_header_name] != client_request_id:
+ # raise AzureError(
+ # "Echoed client request ID: {} does not match sent client request ID: {}. "
+ # "Service request ID: {}".format(
+ # response.http_response.headers[self.request_id_header_name], client_request_id,
+ # response.http_response.headers['x-ms-request-id']),
+ # response=response.http_response
+ # )
+
+
+class StorageHosts(SansIOHTTPPolicy):
+
+ def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument
+ self.hosts = hosts
+ super(StorageHosts, self).__init__()
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ request.context.options['hosts'] = self.hosts
+ parsed_url = urlparse(request.http_request.url)
+
+ # Detect what location mode we're currently requesting with
+ location_mode = LocationMode.PRIMARY
+ for key, value in self.hosts.items():
+ if parsed_url.netloc == value:
+ location_mode = key
+
+ # See if a specific location mode has been specified, and if so, redirect
+ use_location = request.context.options.pop('use_location', None)
+ if use_location:
+ # Lock retries to the specific location
+ request.context.options['retry_to_secondary'] = False
+ if use_location not in self.hosts:
+ raise ValueError("Attempting to use undefined host location {}".format(use_location))
+ if use_location != location_mode:
+ # Update request URL to use the specified location
+ updated = parsed_url._replace(netloc=self.hosts[use_location])
+ request.http_request.url = updated.geturl()
+ location_mode = use_location
+
+ request.context.options['location_mode'] = location_mode
+
+
+class StorageLoggingPolicy(NetworkTraceLoggingPolicy):
+ """A policy that logs HTTP request and response to the DEBUG logger.
+
+ This accepts both global configuration, and per-request level with "enable_http_logger"
+ """
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ http_request = request.http_request
+ options = request.context.options
+ if options.pop("logging_enable", self.enable_http_logger):
+ request.context["logging_enable"] = True
+ if not _LOGGER.isEnabledFor(logging.DEBUG):
+ return
+
+ try:
+ log_url = http_request.url
+ query_params = http_request.query
+ if 'sig' in query_params:
+ log_url = log_url.replace(query_params['sig'], "sig=*****")
+ _LOGGER.debug("Request URL: %r", log_url)
+ _LOGGER.debug("Request method: %r", http_request.method)
+ _LOGGER.debug("Request headers:")
+ for header, value in http_request.headers.items():
+ if header.lower() == 'authorization':
+ value = '*****'
+ elif header.lower() == 'x-ms-copy-source' and 'sig' in value:
+ # take the url apart and scrub away the signed signature
+ scheme, netloc, path, params, query, fragment = urlparse(value)
+ parsed_qs = dict(parse_qsl(query))
+ parsed_qs['sig'] = '*****'
+
+ # the SAS needs to be put back together
+ value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment))
+
+ _LOGGER.debug(" %r: %r", header, value)
+ _LOGGER.debug("Request body:")
+
+ # We don't want to log the binary data of a file upload.
+ if isinstance(http_request.body, types.GeneratorType):
+ _LOGGER.debug("File upload")
+ else:
+ _LOGGER.debug(str(http_request.body))
+ except Exception as err: # pylint: disable=broad-except
+ _LOGGER.debug("Failed to log request: %r", err)
+
+ def on_response(self, request, response):
+ # type: (PipelineRequest, PipelineResponse, Any) -> None
+ if response.context.pop("logging_enable", self.enable_http_logger):
+ if not _LOGGER.isEnabledFor(logging.DEBUG):
+ return
+
+ try:
+ _LOGGER.debug("Response status: %r", response.http_response.status_code)
+ _LOGGER.debug("Response headers:")
+ for res_header, value in response.http_response.headers.items():
+ _LOGGER.debug(" %r: %r", res_header, value)
+
+ # We don't want to log binary data if the response is a file.
+ _LOGGER.debug("Response content:")
+ pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE)
+ header = response.http_response.headers.get('content-disposition')
+
+ if header and pattern.match(header):
+ filename = header.partition('=')[2]
+ _LOGGER.debug("File attachments: %s", filename)
+ elif response.http_response.headers.get("content-type", "").endswith("octet-stream"):
+ _LOGGER.debug("Body contains binary data.")
+ elif response.http_response.headers.get("content-type", "").startswith("image"):
+ _LOGGER.debug("Body contains image data.")
+ else:
+ if response.context.options.get('stream', False):
+ _LOGGER.debug("Body is streamable")
+ else:
+ _LOGGER.debug(response.http_response.text())
+ except Exception as err: # pylint: disable=broad-except
+ _LOGGER.debug("Failed to log response: %s", repr(err))
+
+
+class StorageRequestHook(SansIOHTTPPolicy):
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ self._request_callback = kwargs.get('raw_request_hook')
+ super(StorageRequestHook, self).__init__()
+
+ def on_request(self, request):
+ # type: (PipelineRequest, **Any) -> PipelineResponse
+ request_callback = request.context.options.pop('raw_request_hook', self._request_callback)
+ if request_callback:
+ request_callback(request)
+
+
+class StorageResponseHook(HTTPPolicy):
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ self._response_callback = kwargs.get('raw_response_hook')
+ super(StorageResponseHook, self).__init__()
+
+ def send(self, request):
+ # type: (PipelineRequest) -> PipelineResponse
+ data_stream_total = request.context.get('data_stream_total') or \
+ request.context.options.pop('data_stream_total', None)
+ download_stream_current = request.context.get('download_stream_current') or \
+ request.context.options.pop('download_stream_current', None)
+ upload_stream_current = request.context.get('upload_stream_current') or \
+ request.context.options.pop('upload_stream_current', None)
+ response_callback = request.context.get('response_callback') or \
+ request.context.options.pop('raw_response_hook', self._response_callback)
+
+ response = self.next.send(request)
+ will_retry = is_retry(response, request.context.options.get('mode'))
+ if not will_retry and download_stream_current is not None:
+ download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+ if data_stream_total is None:
+ content_range = response.http_response.headers.get('Content-Range')
+ if content_range:
+ data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+ else:
+ data_stream_total = download_stream_current
+ elif not will_retry and upload_stream_current is not None:
+ upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+ for pipeline_obj in [request, response]:
+ pipeline_obj.context['data_stream_total'] = data_stream_total
+ pipeline_obj.context['download_stream_current'] = download_stream_current
+ pipeline_obj.context['upload_stream_current'] = upload_stream_current
+ if response_callback:
+ response_callback(response)
+ request.context['response_callback'] = response_callback
+ return response
+
+
+class StorageContentValidation(SansIOHTTPPolicy):
+ """A simple policy that sends the given headers
+ with the request.
+
+ This will overwrite any headers already defined in the request.
+ """
+ header_name = 'Content-MD5'
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ super(StorageContentValidation, self).__init__()
+
+ @staticmethod
+ def get_content_md5(data):
+ md5 = hashlib.md5() # nosec
+ if isinstance(data, bytes):
+ md5.update(data)
+ elif hasattr(data, 'read'):
+ pos = 0
+ try:
+ pos = data.tell()
+ except: # pylint: disable=bare-except
+ pass
+ for chunk in iter(lambda: data.read(4096), b""):
+ md5.update(chunk)
+ try:
+ data.seek(pos, SEEK_SET)
+ except (AttributeError, IOError):
+ raise ValueError("Data should be bytes or a seekable file-like object.")
+ else:
+ raise ValueError("Data should be bytes or a seekable file-like object.")
+
+ return md5.digest()
+
+ def on_request(self, request):
+ # type: (PipelineRequest, Any) -> None
+ validate_content = request.context.options.pop('validate_content', False)
+ if validate_content and request.http_request.method != 'GET':
+ computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data))
+ request.http_request.headers[self.header_name] = computed_md5
+ request.context['validate_content_md5'] = computed_md5
+ request.context['validate_content'] = validate_content
+
+ def on_response(self, request, response):
+ if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'):
+ computed_md5 = request.context.get('validate_content_md5') or \
+ encode_base64(StorageContentValidation.get_content_md5(response.http_response.body()))
+ if response.http_response.headers['content-md5'] != computed_md5:
+ raise AzureError(
+ 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format(
+ response.http_response.headers['content-md5'], computed_md5),
+ response=response.http_response
+ )
+
+
+class StorageRetryPolicy(HTTPPolicy):
+ """
+ The base class for Exponential and Linear retries containing shared code.
+ """
+
+ def __init__(self, **kwargs):
+ self.total_retries = kwargs.pop('retry_total', 10)
+ self.connect_retries = kwargs.pop('retry_connect', 3)
+ self.read_retries = kwargs.pop('retry_read', 3)
+ self.status_retries = kwargs.pop('retry_status', 3)
+ self.retry_to_secondary = kwargs.pop('retry_to_secondary', False)
+ super(StorageRetryPolicy, self).__init__()
+
+ def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use
+ """
+ A function which sets the next host location on the request, if applicable.
+
+ :param ~azure.storage.models.RetryContext context:
+ The retry context containing the previous host location and the request
+ to evaluate and possibly modify.
+ """
+ if settings['hosts'] and all(settings['hosts'].values()):
+ url = urlparse(request.url)
+ # If there's more than one possible location, retry to the alternative
+ if settings['mode'] == LocationMode.PRIMARY:
+ settings['mode'] = LocationMode.SECONDARY
+ else:
+ settings['mode'] = LocationMode.PRIMARY
+ updated = url._replace(netloc=settings['hosts'].get(settings['mode']))
+ request.url = updated.geturl()
+
+ def configure_retries(self, request): # pylint: disable=no-self-use
+ body_position = None
+ if hasattr(request.http_request.body, 'read'):
+ try:
+ body_position = request.http_request.body.tell()
+ except (AttributeError, UnsupportedOperation):
+ # if body position cannot be obtained, then retries will not work
+ pass
+ options = request.context.options
+ return {
+ 'total': options.pop("retry_total", self.total_retries),
+ 'connect': options.pop("retry_connect", self.connect_retries),
+ 'read': options.pop("retry_read", self.read_retries),
+ 'status': options.pop("retry_status", self.status_retries),
+ 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary),
+ 'mode': options.pop("location_mode", LocationMode.PRIMARY),
+ 'hosts': options.pop("hosts", None),
+ 'hook': options.pop("retry_hook", None),
+ 'body_position': body_position,
+ 'count': 0,
+ 'history': []
+ }
+
+ def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use
+ """ Formula for computing the current backoff.
+ Should be calculated by child class.
+
+ :rtype: float
+ """
+ return 0
+
+ def sleep(self, settings, transport):
+ backoff = self.get_backoff_time(settings)
+ if not backoff or backoff < 0:
+ return
+ transport.sleep(backoff)
+
+ def increment(self, settings, request, response=None, error=None):
+ """Increment the retry counters.
+
+ :param response: A pipeline response object.
+ :param error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: Whether the retry attempts are exhausted.
+ """
+ settings['total'] -= 1
+
+ if error and isinstance(error, ServiceRequestError):
+ # Errors when we're fairly sure that the server did not receive the
+ # request, so it should be safe to retry.
+ settings['connect'] -= 1
+ settings['history'].append(RequestHistory(request, error=error))
+
+ elif error and isinstance(error, ServiceResponseError):
+ # Errors that occur after the request has been started, so we should
+ # assume that the server began processing it.
+ settings['read'] -= 1
+ settings['history'].append(RequestHistory(request, error=error))
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and a the given method is in the whitelist
+ if response:
+ settings['status'] -= 1
+ settings['history'].append(RequestHistory(request, http_response=response))
+
+ if not is_exhausted(settings):
+ if request.method not in ['PUT'] and settings['retry_secondary']:
+ self._set_next_host_location(settings, request)
+
+ # rewind the request body if it is a stream
+ if request.body and hasattr(request.body, 'read'):
+ # no position was saved, then retry would not work
+ if settings['body_position'] is None:
+ return False
+ try:
+ # attempt to rewind the body to the initial position
+ request.body.seek(settings['body_position'], SEEK_SET)
+ except (UnsupportedOperation, ValueError):
+ # if body is not seekable, then retry would not work
+ return False
+ settings['count'] += 1
+ return True
+ return False
+
+ def send(self, request):
+ retries_remaining = True
+ response = None
+ retry_settings = self.configure_retries(request)
+ while retries_remaining:
+ try:
+ response = self.next.send(request)
+ if is_retry(response, retry_settings['mode']):
+ retries_remaining = self.increment(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response)
+ if retries_remaining:
+ retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response,
+ error=None)
+ self.sleep(retry_settings, request.context.transport)
+ continue
+ break
+ except AzureError as err:
+ retries_remaining = self.increment(
+ retry_settings, request=request.http_request, error=err)
+ if retries_remaining:
+ retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=None,
+ error=err)
+ self.sleep(retry_settings, request.context.transport)
+ continue
+ raise err
+ if retry_settings['history']:
+ response.context['history'] = retry_settings['history']
+ response.http_response.location_mode = retry_settings['mode']
+ return response
+
+
+class ExponentialRetry(StorageRetryPolicy):
+ """Exponential retry."""
+
+ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
+ retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ '''
+ Constructs an Exponential retry object. The initial_backoff is used for
+ the first retry. Subsequent retries are retried after initial_backoff +
+ increment_power^retry_count seconds. For example, by default the first retry
+ occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+ third after (15+3^2) = 24 seconds.
+
+ :param int initial_backoff:
+ The initial backoff interval, in seconds, for the first retry.
+ :param int increment_base:
+ The base, in seconds, to increment the initial_backoff by after the
+ first retry.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ '''
+ self.initial_backoff = initial_backoff
+ self.increment_base = increment_base
+ self.random_jitter_range = random_jitter_range
+ super(ExponentialRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+ random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+ random_range_end = backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(StorageRetryPolicy):
+ """Linear retry."""
+
+ def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ """
+ Constructs a Linear retry object.
+
+ :param int backoff:
+ The backoff interval, in seconds, between retries.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ """
+ self.backoff = backoff
+ self.random_jitter_range = random_jitter_range
+ super(LinearRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ # the backoff interval normally does not change, however there is the possibility
+ # that it was modified by accessing the property directly after initializing the object
+ random_range_start = self.backoff - self.random_jitter_range \
+ if self.backoff > self.random_jitter_range else 0
+ random_range_end = self.backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/policies_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/policies_async.py
new file mode 100644
index 00000000000..e0926b81dbc
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/policies_async.py
@@ -0,0 +1,220 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+import asyncio
+import random
+import logging
+from typing import Any, TYPE_CHECKING
+
+from azure.core.pipeline.policies import AsyncHTTPPolicy
+from azure.core.exceptions import AzureError
+
+from .policies import is_retry, StorageRetryPolicy
+
+if TYPE_CHECKING:
+ from azure.core.pipeline import PipelineRequest, PipelineResponse
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+async def retry_hook(settings, **kwargs):
+ if settings['hook']:
+ if asyncio.iscoroutine(settings['hook']):
+ await settings['hook'](
+ retry_count=settings['count'] - 1,
+ location_mode=settings['mode'],
+ **kwargs)
+ else:
+ settings['hook'](
+ retry_count=settings['count'] - 1,
+ location_mode=settings['mode'],
+ **kwargs)
+
+
+class AsyncStorageResponseHook(AsyncHTTPPolicy):
+
+ def __init__(self, **kwargs): # pylint: disable=unused-argument
+ self._response_callback = kwargs.get('raw_response_hook')
+ super(AsyncStorageResponseHook, self).__init__()
+
+ async def send(self, request):
+ # type: (PipelineRequest) -> PipelineResponse
+ data_stream_total = request.context.get('data_stream_total') or \
+ request.context.options.pop('data_stream_total', None)
+ download_stream_current = request.context.get('download_stream_current') or \
+ request.context.options.pop('download_stream_current', None)
+ upload_stream_current = request.context.get('upload_stream_current') or \
+ request.context.options.pop('upload_stream_current', None)
+ response_callback = request.context.get('response_callback') or \
+ request.context.options.pop('raw_response_hook', self._response_callback)
+
+ response = await self.next.send(request)
+ await response.http_response.load_body()
+
+ will_retry = is_retry(response, request.context.options.get('mode'))
+ if not will_retry and download_stream_current is not None:
+ download_stream_current += int(response.http_response.headers.get('Content-Length', 0))
+ if data_stream_total is None:
+ content_range = response.http_response.headers.get('Content-Range')
+ if content_range:
+ data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1])
+ else:
+ data_stream_total = download_stream_current
+ elif not will_retry and upload_stream_current is not None:
+ upload_stream_current += int(response.http_request.headers.get('Content-Length', 0))
+ for pipeline_obj in [request, response]:
+ pipeline_obj.context['data_stream_total'] = data_stream_total
+ pipeline_obj.context['download_stream_current'] = download_stream_current
+ pipeline_obj.context['upload_stream_current'] = upload_stream_current
+ if response_callback:
+ if asyncio.iscoroutine(response_callback):
+ await response_callback(response)
+ else:
+ response_callback(response)
+ request.context['response_callback'] = response_callback
+ return response
+
+class AsyncStorageRetryPolicy(StorageRetryPolicy):
+ """
+ The base class for Exponential and Linear retries containing shared code.
+ """
+
+ async def sleep(self, settings, transport):
+ backoff = self.get_backoff_time(settings)
+ if not backoff or backoff < 0:
+ return
+ await transport.sleep(backoff)
+
+ async def send(self, request):
+ retries_remaining = True
+ response = None
+ retry_settings = self.configure_retries(request)
+ while retries_remaining:
+ try:
+ response = await self.next.send(request)
+ if is_retry(response, retry_settings['mode']):
+ retries_remaining = self.increment(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response)
+ if retries_remaining:
+ await retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=response.http_response,
+ error=None)
+ await self.sleep(retry_settings, request.context.transport)
+ continue
+ break
+ except AzureError as err:
+ retries_remaining = self.increment(
+ retry_settings, request=request.http_request, error=err)
+ if retries_remaining:
+ await retry_hook(
+ retry_settings,
+ request=request.http_request,
+ response=None,
+ error=err)
+ await self.sleep(retry_settings, request.context.transport)
+ continue
+ raise err
+ if retry_settings['history']:
+ response.context['history'] = retry_settings['history']
+ response.http_response.location_mode = retry_settings['mode']
+ return response
+
+
+class ExponentialRetry(AsyncStorageRetryPolicy):
+ """Exponential retry."""
+
+ def __init__(self, initial_backoff=15, increment_base=3, retry_total=3,
+ retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ '''
+ Constructs an Exponential retry object. The initial_backoff is used for
+ the first retry. Subsequent retries are retried after initial_backoff +
+ increment_power^retry_count seconds. For example, by default the first retry
+ occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the
+ third after (15+3^2) = 24 seconds.
+
+ :param int initial_backoff:
+ The initial backoff interval, in seconds, for the first retry.
+ :param int increment_base:
+ The base, in seconds, to increment the initial_backoff by after the
+ first retry.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ '''
+ self.initial_backoff = initial_backoff
+ self.increment_base = increment_base
+ self.random_jitter_range = random_jitter_range
+ super(ExponentialRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count']))
+ random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0
+ random_range_end = backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
+
+
+class LinearRetry(AsyncStorageRetryPolicy):
+ """Linear retry."""
+
+ def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs):
+ """
+ Constructs a Linear retry object.
+
+ :param int backoff:
+ The backoff interval, in seconds, between retries.
+ :param int max_attempts:
+ The maximum number of retry attempts.
+ :param bool retry_to_secondary:
+ Whether the request should be retried to secondary, if able. This should
+ only be enabled of RA-GRS accounts are used and potentially stale data
+ can be handled.
+ :param int random_jitter_range:
+ A number in seconds which indicates a range to jitter/randomize for the back-off interval.
+ For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3.
+ """
+ self.backoff = backoff
+ self.random_jitter_range = random_jitter_range
+ super(LinearRetry, self).__init__(
+ retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs)
+
+ def get_backoff_time(self, settings):
+ """
+ Calculates how long to sleep before retrying.
+
+ :return:
+ An integer indicating how long to wait before retrying the request,
+ or None to indicate no retry should be performed.
+ :rtype: int or None
+ """
+ random_generator = random.Random()
+ # the backoff interval normally does not change, however there is the possibility
+ # that it was modified by accessing the property directly after initializing the object
+ random_range_start = self.backoff - self.random_jitter_range \
+ if self.backoff > self.random_jitter_range else 0
+ random_range_end = self.backoff + self.random_jitter_range
+ return random_generator.uniform(random_range_start, random_range_end)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/request_handlers.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/request_handlers.py
new file mode 100644
index 00000000000..4f15b65a4b6
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/request_handlers.py
@@ -0,0 +1,147 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
+ TYPE_CHECKING
+)
+
+import logging
+from os import fstat
+from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
+
+import isodate
+
+from azure.core.exceptions import raise_with_traceback
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def serialize_iso(attr):
+ """Serialize Datetime object into ISO-8601 formatted string.
+
+ :param Datetime attr: Object to be serialized.
+ :rtype: str
+ :raises: ValueError if format invalid.
+ """
+ if not attr:
+ return None
+ if isinstance(attr, str):
+ attr = isodate.parse_datetime(attr)
+ try:
+ utc = attr.utctimetuple()
+ if utc.tm_year > 9999 or utc.tm_year < 1:
+ raise OverflowError("Hit max or min date")
+
+ date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
+ utc.tm_year, utc.tm_mon, utc.tm_mday,
+ utc.tm_hour, utc.tm_min, utc.tm_sec)
+ return date + 'Z'
+ except (ValueError, OverflowError) as err:
+ msg = "Unable to serialize datetime object."
+ raise_with_traceback(ValueError, msg, err)
+ except AttributeError as err:
+ msg = "ISO-8601 object must be valid Datetime object."
+ raise_with_traceback(TypeError, msg, err)
+
+
+def get_length(data):
+ length = None
+ # Check if object implements the __len__ method, covers most input cases such as bytearray.
+ try:
+ length = len(data)
+ except: # pylint: disable=bare-except
+ pass
+
+ if not length:
+ # Check if the stream is a file-like stream object.
+ # If so, calculate the size using the file descriptor.
+ try:
+ fileno = data.fileno()
+ except (AttributeError, UnsupportedOperation):
+ pass
+ else:
+ try:
+ return fstat(fileno).st_size
+ except OSError:
+ # Not a valid fileno, may be possible requests returned
+ # a socket number?
+ pass
+
+ # If the stream is seekable and tell() is implemented, calculate the stream size.
+ try:
+ current_position = data.tell()
+ data.seek(0, SEEK_END)
+ length = data.tell() - current_position
+ data.seek(current_position, SEEK_SET)
+ except (AttributeError, UnsupportedOperation):
+ pass
+
+ return length
+
+
+def read_length(data):
+ try:
+ if hasattr(data, 'read'):
+ read_data = b''
+ for chunk in iter(lambda: data.read(4096), b""):
+ read_data += chunk
+ return len(read_data), read_data
+ if hasattr(data, '__iter__'):
+ read_data = b''
+ for chunk in data:
+ read_data += chunk
+ return len(read_data), read_data
+ except: # pylint: disable=bare-except
+ pass
+ raise ValueError("Unable to calculate content length, please specify.")
+
+
+def validate_and_format_range_headers(
+ start_range, end_range, start_range_required=True,
+ end_range_required=True, check_content_md5=False, align_to_page=False):
+ # If end range is provided, start range must be provided
+ if (start_range_required or end_range is not None) and start_range is None:
+ raise ValueError("start_range value cannot be None.")
+ if end_range_required and end_range is None:
+ raise ValueError("end_range value cannot be None.")
+
+ # Page ranges must be 512 aligned
+ if align_to_page:
+ if start_range is not None and start_range % 512 != 0:
+ raise ValueError("Invalid page blob start_range: {0}. "
+ "The size must be aligned to a 512-byte boundary.".format(start_range))
+ if end_range is not None and end_range % 512 != 511:
+ raise ValueError("Invalid page blob end_range: {0}. "
+ "The size must be aligned to a 512-byte boundary.".format(end_range))
+
+ # Format based on whether end_range is present
+ range_header = None
+ if end_range is not None:
+ range_header = 'bytes={0}-{1}'.format(start_range, end_range)
+ elif start_range is not None:
+ range_header = "bytes={0}-".format(start_range)
+
+ # Content MD5 can only be provided for a complete range less than 4MB in size
+ range_validation = None
+ if check_content_md5:
+ if start_range is None or end_range is None:
+ raise ValueError("Both start and end range requied for MD5 content validation.")
+ if end_range - start_range > 4 * 1024 * 1024:
+ raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
+ range_validation = 'true'
+
+ return range_header, range_validation
+
+
+def add_metadata_headers(metadata=None):
+ # type: (Optional[Dict[str, str]]) -> Dict[str, str]
+ headers = {}
+ if metadata:
+ for key, value in metadata.items():
+ headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value
+ return headers
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/response_handlers.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/response_handlers.py
new file mode 100644
index 00000000000..ac526e59416
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/response_handlers.py
@@ -0,0 +1,159 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
+ TYPE_CHECKING
+)
+import logging
+
+from azure.core.pipeline.policies import ContentDecodePolicy
+from azure.core.exceptions import (
+ HttpResponseError,
+ ResourceNotFoundError,
+ ResourceModifiedError,
+ ResourceExistsError,
+ ClientAuthenticationError,
+ DecodeError)
+
+from .parser import _to_utc_datetime
+from .models import StorageErrorCode, UserDelegationKey, get_enum_value
+
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from azure.core.exceptions import AzureError
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class PartialBatchErrorException(HttpResponseError):
+ """There is a partial failure in batch operations.
+
+ :param str message: The message of the exception.
+ :param response: Server response to be deserialized.
+ :param list parts: A list of the parts in multipart response.
+ """
+
+ def __init__(self, message, response, parts):
+ self.parts = parts
+ super(PartialBatchErrorException, self).__init__(message=message, response=response)
+
+
+def parse_length_from_content_range(content_range):
+ '''
+ Parses the blob length from the content range header: bytes 1-3/65537
+ '''
+ if content_range is None:
+ return None
+
+ # First, split in space and take the second half: '1-3/65537'
+ # Next, split on slash and take the second half: '65537'
+ # Finally, convert to an int: 65537
+ return int(content_range.split(' ', 1)[1].split('/', 1)[1])
+
+
+def normalize_headers(headers):
+ normalized = {}
+ for key, value in headers.items():
+ if key.startswith('x-ms-'):
+ key = key[5:]
+ normalized[key.lower().replace('-', '_')] = get_enum_value(value)
+ return normalized
+
+
+def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument
+ raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")}
+ return {k[10:]: v for k, v in raw_metadata.items()}
+
+
+def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument
+ return normalize_headers(response_headers)
+
+
+def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument
+ return normalize_headers(response_headers), deserialized
+
+
+def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument
+ return response.location_mode, deserialized
+
+
+def process_storage_error(storage_error):
+ raise_error = HttpResponseError
+ error_code = storage_error.response.headers.get('x-ms-error-code')
+ error_message = storage_error.message
+ additional_data = {}
+ try:
+ error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response)
+ if error_body:
+ for info in error_body.iter():
+ if info.tag.lower() == 'code':
+ error_code = info.text
+ elif info.tag.lower() == 'message':
+ error_message = info.text
+ else:
+ additional_data[info.tag] = info.text
+ except DecodeError:
+ pass
+
+ try:
+ if error_code:
+ error_code = StorageErrorCode(error_code)
+ if error_code in [StorageErrorCode.condition_not_met,
+ StorageErrorCode.blob_overwritten]:
+ raise_error = ResourceModifiedError
+ if error_code in [StorageErrorCode.invalid_authentication_info,
+ StorageErrorCode.authentication_failed]:
+ raise_error = ClientAuthenticationError
+ if error_code in [StorageErrorCode.resource_not_found,
+ StorageErrorCode.cannot_verify_copy_source,
+ StorageErrorCode.blob_not_found,
+ StorageErrorCode.queue_not_found,
+ StorageErrorCode.container_not_found,
+ StorageErrorCode.parent_not_found,
+ StorageErrorCode.share_not_found]:
+ raise_error = ResourceNotFoundError
+ if error_code in [StorageErrorCode.account_already_exists,
+ StorageErrorCode.account_being_created,
+ StorageErrorCode.resource_already_exists,
+ StorageErrorCode.resource_type_mismatch,
+ StorageErrorCode.blob_already_exists,
+ StorageErrorCode.queue_already_exists,
+ StorageErrorCode.container_already_exists,
+ StorageErrorCode.container_being_deleted,
+ StorageErrorCode.queue_being_deleted,
+ StorageErrorCode.share_already_exists,
+ StorageErrorCode.share_being_deleted]:
+ raise_error = ResourceExistsError
+ except ValueError:
+ # Got an unknown error code
+ pass
+
+ try:
+ error_message += "\nErrorCode:{}".format(error_code.value)
+ except AttributeError:
+ error_message += "\nErrorCode:{}".format(error_code)
+ for name, info in additional_data.items():
+ error_message += "\n{}:{}".format(name, info)
+
+ error = raise_error(message=error_message, response=storage_error.response)
+ error.error_code = error_code
+ error.additional_info = additional_data
+ raise error
+
+
+def parse_to_internal_user_delegation_key(service_user_delegation_key):
+ internal_user_delegation_key = UserDelegationKey()
+ internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid
+ internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid
+ internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start)
+ internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry)
+ internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service
+ internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version
+ internal_user_delegation_key.value = service_user_delegation_key.value
+ return internal_user_delegation_key
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/shared_access_signature.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/shared_access_signature.py
new file mode 100644
index 00000000000..07aad5ffa1c
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/shared_access_signature.py
@@ -0,0 +1,220 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from datetime import date
+
+from .parser import _str, _to_utc_datetime
+from .constants import X_MS_VERSION
+from . import sign_string, url_quote
+
+
+class QueryStringConstants(object):
+ SIGNED_SIGNATURE = 'sig'
+ SIGNED_PERMISSION = 'sp'
+ SIGNED_START = 'st'
+ SIGNED_EXPIRY = 'se'
+ SIGNED_RESOURCE = 'sr'
+ SIGNED_IDENTIFIER = 'si'
+ SIGNED_IP = 'sip'
+ SIGNED_PROTOCOL = 'spr'
+ SIGNED_VERSION = 'sv'
+ SIGNED_CACHE_CONTROL = 'rscc'
+ SIGNED_CONTENT_DISPOSITION = 'rscd'
+ SIGNED_CONTENT_ENCODING = 'rsce'
+ SIGNED_CONTENT_LANGUAGE = 'rscl'
+ SIGNED_CONTENT_TYPE = 'rsct'
+ START_PK = 'spk'
+ START_RK = 'srk'
+ END_PK = 'epk'
+ END_RK = 'erk'
+ SIGNED_RESOURCE_TYPES = 'srt'
+ SIGNED_SERVICES = 'ss'
+ SIGNED_OID = 'skoid'
+ SIGNED_TID = 'sktid'
+ SIGNED_KEY_START = 'skt'
+ SIGNED_KEY_EXPIRY = 'ske'
+ SIGNED_KEY_SERVICE = 'sks'
+ SIGNED_KEY_VERSION = 'skv'
+
+ # for ADLS
+ SIGNED_AUTHORIZED_OID = 'saoid'
+ SIGNED_UNAUTHORIZED_OID = 'suoid'
+ SIGNED_CORRELATION_ID = 'scid'
+ SIGNED_DIRECTORY_DEPTH = 'sdd'
+
+ @staticmethod
+ def to_list():
+ return [
+ QueryStringConstants.SIGNED_SIGNATURE,
+ QueryStringConstants.SIGNED_PERMISSION,
+ QueryStringConstants.SIGNED_START,
+ QueryStringConstants.SIGNED_EXPIRY,
+ QueryStringConstants.SIGNED_RESOURCE,
+ QueryStringConstants.SIGNED_IDENTIFIER,
+ QueryStringConstants.SIGNED_IP,
+ QueryStringConstants.SIGNED_PROTOCOL,
+ QueryStringConstants.SIGNED_VERSION,
+ QueryStringConstants.SIGNED_CACHE_CONTROL,
+ QueryStringConstants.SIGNED_CONTENT_DISPOSITION,
+ QueryStringConstants.SIGNED_CONTENT_ENCODING,
+ QueryStringConstants.SIGNED_CONTENT_LANGUAGE,
+ QueryStringConstants.SIGNED_CONTENT_TYPE,
+ QueryStringConstants.START_PK,
+ QueryStringConstants.START_RK,
+ QueryStringConstants.END_PK,
+ QueryStringConstants.END_RK,
+ QueryStringConstants.SIGNED_RESOURCE_TYPES,
+ QueryStringConstants.SIGNED_SERVICES,
+ QueryStringConstants.SIGNED_OID,
+ QueryStringConstants.SIGNED_TID,
+ QueryStringConstants.SIGNED_KEY_START,
+ QueryStringConstants.SIGNED_KEY_EXPIRY,
+ QueryStringConstants.SIGNED_KEY_SERVICE,
+ QueryStringConstants.SIGNED_KEY_VERSION,
+ # for ADLS
+ QueryStringConstants.SIGNED_AUTHORIZED_OID,
+ QueryStringConstants.SIGNED_UNAUTHORIZED_OID,
+ QueryStringConstants.SIGNED_CORRELATION_ID,
+ QueryStringConstants.SIGNED_DIRECTORY_DEPTH,
+ ]
+
+
+class SharedAccessSignature(object):
+ '''
+ Provides a factory for creating account access
+ signature tokens with an account name and account key. Users can either
+ use the factory or can construct the appropriate service and use the
+ generate_*_shared_access_signature method directly.
+ '''
+
+ def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION):
+ '''
+ :param str account_name:
+ The storage account name used to generate the shared access signatures.
+ :param str account_key:
+ The access key to generate the shares access signatures.
+ :param str x_ms_version:
+ The service version used to generate the shared access signatures.
+ '''
+ self.account_name = account_name
+ self.account_key = account_key
+ self.x_ms_version = x_ms_version
+
+ def generate_account(self, services, resource_types, permission, expiry, start=None,
+ ip=None, protocol=None):
+ '''
+ Generates a shared access signature for the account.
+ Use the returned signature with the sas_token parameter of the service
+ or to create a new account object.
+
+ :param ResourceTypes resource_types:
+ Specifies the resource types that are accessible with the account
+ SAS. You can combine values to provide access to more than one
+ resource type.
+ :param AccountSasPermissions permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy. You can combine
+ values to provide more than one permission.
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: datetime or str
+ :param str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :param str protocol:
+ Specifies the protocol permitted for a request made. The default value
+ is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+ '''
+ sas = _SharedAccessHelper()
+ sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+ sas.add_account(services, resource_types)
+ sas.add_account_signature(self.account_name, self.account_key)
+
+ return sas.get_token()
+
+
+class _SharedAccessHelper(object):
+ def __init__(self):
+ self.query_dict = {}
+
+ def _add_query(self, name, val):
+ if val:
+ self.query_dict[name] = _str(val) if val is not None else None
+
+ def add_base(self, permission, expiry, start, ip, protocol, x_ms_version):
+ if isinstance(start, date):
+ start = _to_utc_datetime(start)
+
+ if isinstance(expiry, date):
+ expiry = _to_utc_datetime(expiry)
+
+ self._add_query(QueryStringConstants.SIGNED_START, start)
+ self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry)
+ self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission)
+ self._add_query(QueryStringConstants.SIGNED_IP, ip)
+ self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol)
+ self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version)
+
+ def add_resource(self, resource):
+ self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource)
+
+ def add_id(self, policy_id):
+ self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id)
+
+ def add_account(self, services, resource_types):
+ self._add_query(QueryStringConstants.SIGNED_SERVICES, services)
+ self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types)
+
+ def add_override_response_headers(self, cache_control,
+ content_disposition,
+ content_encoding,
+ content_language,
+ content_type):
+ self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language)
+ self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type)
+
+ def add_account_signature(self, account_name, account_key):
+ def get_value_to_append(query):
+ return_value = self.query_dict.get(query) or ''
+ return return_value + '\n'
+
+ string_to_sign = \
+ (account_name + '\n' +
+ get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+ get_value_to_append(QueryStringConstants.SIGNED_SERVICES) +
+ get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) +
+ get_value_to_append(QueryStringConstants.SIGNED_START) +
+ get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+ get_value_to_append(QueryStringConstants.SIGNED_IP) +
+ get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+ get_value_to_append(QueryStringConstants.SIGNED_VERSION))
+
+ self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+ sign_string(account_key, string_to_sign))
+
+ def get_token(self):
+ return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None])
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/uploads.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/uploads.py
new file mode 100644
index 00000000000..abf3fb2ce00
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/uploads.py
@@ -0,0 +1,550 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+
+from concurrent import futures
+from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
+from threading import Lock
+from itertools import islice
+from math import ceil
+
+import six
+
+from azure.core.tracing.common import with_current_context
+
+from . import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .encryption import get_blob_encryptor_and_padder
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
+
+
+def _parallel_uploads(executor, uploader, pending, running):
+ range_ids = []
+ while True:
+ # Wait for some download to finish before adding a new one
+ done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
+ range_ids.extend([chunk.result() for chunk in done])
+ try:
+ for _ in range(0, len(done)):
+ next_chunk = next(pending)
+ running.add(executor.submit(with_current_context(uploader), next_chunk))
+ except StopIteration:
+ break
+
+ # Wait for the remaining uploads to finish
+ done, _running = futures.wait(running)
+ range_ids.extend([chunk.result() for chunk in done])
+ return range_ids
+
+
+def upload_data_chunks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ validate_content=None,
+ encryption_options=None,
+ **kwargs):
+
+ if encryption_options:
+ encryptor, padder = get_blob_encryptor_and_padder(
+ encryption_options.get('cek'),
+ encryption_options.get('vector'),
+ uploader_class is not PageBlobChunkUploader)
+ kwargs['encryptor'] = encryptor
+ kwargs['padder'] = padder
+
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ validate_content=validate_content,
+ **kwargs)
+ if parallel:
+ executor = futures.ThreadPoolExecutor(max_concurrency)
+ upload_tasks = uploader.get_chunk_streams()
+ running_futures = [
+ executor.submit(with_current_context(uploader.process_chunk), u)
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
+ else:
+ range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
+ if any(range_ids):
+ return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+ return uploader.response_headers
+
+
+def upload_substream_blocks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ **kwargs):
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ **kwargs)
+
+ if parallel:
+ executor = futures.ThreadPoolExecutor(max_concurrency)
+ upload_tasks = uploader.get_substream_blocks()
+ running_futures = [
+ executor.submit(with_current_context(uploader.process_substream_block), u)
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
+ else:
+ range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
+ return sorted(range_ids)
+
+
+class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
+
+ def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
+ self.service = service
+ self.total_size = total_size
+ self.chunk_size = chunk_size
+ self.stream = stream
+ self.parallel = parallel
+
+ # Stream management
+ self.stream_start = stream.tell() if parallel else None
+ self.stream_lock = Lock() if parallel else None
+
+ # Progress feedback
+ self.progress_total = 0
+ self.progress_lock = Lock() if parallel else None
+
+ # Encryption
+ self.encryptor = encryptor
+ self.padder = padder
+ self.response_headers = None
+ self.etag = None
+ self.last_modified = None
+ self.request_options = kwargs
+
+ def get_chunk_streams(self):
+ index = 0
+ while True:
+ data = b""
+ read_size = self.chunk_size
+
+ # Buffer until we either reach the end of the stream or get a whole chunk.
+ while True:
+ if self.total_size:
+ read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+ temp = self.stream.read(read_size)
+ if not isinstance(temp, six.binary_type):
+ raise TypeError("Blob data should be of type bytes.")
+ data += temp or b""
+
+ # We have read an empty string and so are at the end
+ # of the buffer or we have read a full chunk.
+ if temp == b"" or len(data) == self.chunk_size:
+ break
+
+ if len(data) == self.chunk_size:
+ if self.padder:
+ data = self.padder.update(data)
+ if self.encryptor:
+ data = self.encryptor.update(data)
+ yield index, data
+ else:
+ if self.padder:
+ data = self.padder.update(data) + self.padder.finalize()
+ if self.encryptor:
+ data = self.encryptor.update(data) + self.encryptor.finalize()
+ if data:
+ yield index, data
+ break
+ index += len(data)
+
+ def process_chunk(self, chunk_data):
+ chunk_bytes = chunk_data[1]
+ chunk_offset = chunk_data[0]
+ return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+ def _update_progress(self, length):
+ if self.progress_lock is not None:
+ with self.progress_lock:
+ self.progress_total += length
+ else:
+ self.progress_total += length
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+ range_id = self._upload_chunk(chunk_offset, chunk_data)
+ self._update_progress(len(chunk_data))
+ return range_id
+
+ def get_substream_blocks(self):
+ assert self.chunk_size is not None
+ lock = self.stream_lock
+ blob_length = self.total_size
+
+ if blob_length is None:
+ blob_length = get_length(self.stream)
+ if blob_length is None:
+ raise ValueError("Unable to determine content length of upload data.")
+
+ blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+ last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+ for i in range(blocks):
+ index = i * self.chunk_size
+ length = last_block_size if i == blocks - 1 else self.chunk_size
+ yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
+
+ def process_substream_block(self, block_data):
+ return self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+ def _upload_substream_block(self, block_id, block_stream):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ def _upload_substream_block_with_progress(self, block_id, block_stream):
+ range_id = self._upload_substream_block(block_id, block_stream)
+ self._update_progress(len(block_stream))
+ return range_id
+
+ def set_response_properties(self, resp):
+ self.etag = resp.etag
+ self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+ def __init__(self, *args, **kwargs):
+ kwargs.pop("modified_access_conditions", None)
+ super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ # TODO: This is incorrect, but works with recording.
+ index = '{0:032d}'.format(chunk_offset)
+ block_id = encode_base64(url_quote(encode_base64(index)))
+ self.service.stage_block(
+ block_id,
+ len(chunk_data),
+ chunk_data,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ return index, block_id
+
+ def _upload_substream_block(self, block_id, block_stream):
+ try:
+ self.service.stage_block(
+ block_id,
+ len(block_stream),
+ block_stream,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ finally:
+ block_stream.close()
+ return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def _is_chunk_empty(self, chunk_data):
+ # read until non-zero byte is encountered
+ # if reached the end without returning, then chunk_data is all 0's
+ return not any(bytearray(chunk_data))
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ # avoid uploading the empty pages
+ if not self._is_chunk_empty(chunk_data):
+ chunk_end = chunk_offset + len(chunk_data) - 1
+ content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
+ computed_md5 = None
+ self.response_headers = self.service.upload_pages(
+ chunk_data,
+ content_length=len(chunk_data),
+ transactional_content_md5=computed_md5,
+ range=content_range,
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+
+ if not self.parallel and self.request_options.get('modified_access_conditions'):
+ self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+
+class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def __init__(self, *args, **kwargs):
+ super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ if self.current_length is None:
+ self.response_headers = self.service.append_block(
+ chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ self.current_length = int(self.response_headers["blob_append_offset"])
+ else:
+ self.request_options['append_position_access_conditions'].append_position = \
+ self.current_length + chunk_offset
+ self.response_headers = self.service.append_block(
+ chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+
+
+class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def _upload_chunk(self, chunk_offset, chunk_data):
+ length = len(chunk_data)
+ chunk_end = chunk_offset + length - 1
+ response = self.service.upload_range(
+ chunk_data,
+ chunk_offset,
+ length,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
+
+
+class SubStream(IOBase):
+
+ def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
+ # Python 2.7: file-like objects created with open() typically support seek(), but are not
+ # derivations of io.IOBase and thus do not implement seekable().
+ # Python > 3.0: file-like objects created with open() are derived from io.IOBase.
+ try:
+ # only the main thread runs this, so there's no need grabbing the lock
+ wrapped_stream.seek(0, SEEK_CUR)
+ except:
+ raise ValueError("Wrapped stream must support seek().")
+
+ self._lock = lockObj
+ self._wrapped_stream = wrapped_stream
+ self._position = 0
+ self._stream_begin_index = stream_begin_index
+ self._length = length
+ self._buffer = BytesIO()
+
+ # we must avoid buffering more than necessary, and also not use up too much memory
+ # so the max buffer size is capped at 4MB
+ self._max_buffer_size = (
+ length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
+ )
+ self._current_buffer_start = 0
+ self._current_buffer_size = 0
+ super(SubStream, self).__init__()
+
+ def __len__(self):
+ return self._length
+
+ def close(self):
+ if self._buffer:
+ self._buffer.close()
+ self._wrapped_stream = None
+ IOBase.close(self)
+
+ def fileno(self):
+ return self._wrapped_stream.fileno()
+
+ def flush(self):
+ pass
+
+ def read(self, size=None):
+ if self.closed: # pylint: disable=using-constant-test
+ raise ValueError("Stream is closed.")
+
+ if size is None:
+ size = self._length - self._position
+
+ # adjust if out of bounds
+ if size + self._position >= self._length:
+ size = self._length - self._position
+
+ # return fast
+ if size == 0 or self._buffer.closed:
+ return b""
+
+ # attempt first read from the read buffer and update position
+ read_buffer = self._buffer.read(size)
+ bytes_read = len(read_buffer)
+ bytes_remaining = size - bytes_read
+ self._position += bytes_read
+
+ # repopulate the read buffer from the underlying stream to fulfill the request
+ # ensure the seek and read operations are done atomically (only if a lock is provided)
+ if bytes_remaining > 0:
+ with self._buffer:
+ # either read in the max buffer size specified on the class
+ # or read in just enough data for the current block/sub stream
+ current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
+
+ # lock is only defined if max_concurrency > 1 (parallel uploads)
+ if self._lock:
+ with self._lock:
+ # reposition the underlying stream to match the start of the data to read
+ absolute_position = self._stream_begin_index + self._position
+ self._wrapped_stream.seek(absolute_position, SEEK_SET)
+ # If we can't seek to the right location, our read will be corrupted so fail fast.
+ if self._wrapped_stream.tell() != absolute_position:
+ raise IOError("Stream failed to seek to the desired location.")
+ buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+ else:
+ buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
+
+ if buffer_from_stream:
+ # update the buffer with new data from the wrapped stream
+ # we need to note down the start position and size of the buffer, in case seek is performed later
+ self._buffer = BytesIO(buffer_from_stream)
+ self._current_buffer_start = self._position
+ self._current_buffer_size = len(buffer_from_stream)
+
+ # read the remaining bytes from the new buffer and update position
+ second_read_buffer = self._buffer.read(bytes_remaining)
+ read_buffer += second_read_buffer
+ self._position += len(second_read_buffer)
+
+ return read_buffer
+
+ def readable(self):
+ return True
+
+ def readinto(self, b):
+ raise UnsupportedOperation
+
+ def seek(self, offset, whence=0):
+ if whence is SEEK_SET:
+ start_index = 0
+ elif whence is SEEK_CUR:
+ start_index = self._position
+ elif whence is SEEK_END:
+ start_index = self._length
+ offset = -offset
+ else:
+ raise ValueError("Invalid argument for the 'whence' parameter.")
+
+ pos = start_index + offset
+
+ if pos > self._length:
+ pos = self._length
+ elif pos < 0:
+ pos = 0
+
+ # check if buffer is still valid
+ # if not, drop buffer
+ if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
+ self._buffer.close()
+ self._buffer = BytesIO()
+ else: # if yes seek to correct position
+ delta = pos - self._current_buffer_start
+ self._buffer.seek(delta, SEEK_SET)
+
+ self._position = pos
+ return pos
+
+ def seekable(self):
+ return True
+
+ def tell(self):
+ return self._position
+
+ def write(self):
+ raise UnsupportedOperation
+
+ def writelines(self):
+ raise UnsupportedOperation
+
+ def writeable(self):
+ return False
+
+
+class IterStreamer(object):
+ """
+ File-like streaming iterator.
+ """
+
+ def __init__(self, generator, encoding="UTF-8"):
+ self.generator = generator
+ self.iterator = iter(generator)
+ self.leftover = b""
+ self.encoding = encoding
+
+ def __len__(self):
+ return self.generator.__len__()
+
+ def __iter__(self):
+ return self.iterator
+
+ def seekable(self):
+ return False
+
+ def __next__(self):
+ return next(self.iterator)
+
+ next = __next__ # Python 2 compatibility.
+
+ def tell(self, *args, **kwargs):
+ raise UnsupportedOperation("Data generator does not support tell.")
+
+ def seek(self, *args, **kwargs):
+ raise UnsupportedOperation("Data generator is unseekable.")
+
+ def read(self, size):
+ data = self.leftover
+ count = len(self.leftover)
+ try:
+ while count < size:
+ chunk = self.__next__()
+ if isinstance(chunk, six.text_type):
+ chunk = chunk.encode(self.encoding)
+ data += chunk
+ count += len(chunk)
+ except StopIteration:
+ pass
+
+ if count > size:
+ self.leftover = data[size:]
+
+ return data[:size]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/uploads_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/uploads_async.py
new file mode 100644
index 00000000000..fe68a2b5533
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared/uploads_async.py
@@ -0,0 +1,350 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+
+import asyncio
+from asyncio import Lock
+from itertools import islice
+import threading
+
+from math import ceil
+
+import six
+
+from . import encode_base64, url_quote
+from .request_handlers import get_length
+from .response_handlers import return_response_headers
+from .encryption import get_blob_encryptor_and_padder
+from .uploads import SubStream, IterStreamer # pylint: disable=unused-import
+
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
+
+
+async def _parallel_uploads(uploader, pending, running):
+ range_ids = []
+ while True:
+ # Wait for some download to finish before adding a new one
+ done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)
+ range_ids.extend([chunk.result() for chunk in done])
+ try:
+ for _ in range(0, len(done)):
+ next_chunk = next(pending)
+ running.add(asyncio.ensure_future(uploader(next_chunk)))
+ except StopIteration:
+ break
+
+ # Wait for the remaining uploads to finish
+ if running:
+ done, _running = await asyncio.wait(running)
+ range_ids.extend([chunk.result() for chunk in done])
+ return range_ids
+
+
+async def upload_data_chunks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ encryption_options=None,
+ **kwargs):
+
+ if encryption_options:
+ encryptor, padder = get_blob_encryptor_and_padder(
+ encryption_options.get('cek'),
+ encryption_options.get('vector'),
+ uploader_class is not PageBlobChunkUploader)
+ kwargs['encryptor'] = encryptor
+ kwargs['padder'] = padder
+
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ **kwargs)
+
+ if parallel:
+ upload_tasks = uploader.get_chunk_streams()
+ running_futures = [
+ asyncio.ensure_future(uploader.process_chunk(u))
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures)
+ else:
+ range_ids = []
+ for chunk in uploader.get_chunk_streams():
+ range_ids.append(await uploader.process_chunk(chunk))
+
+ if any(range_ids):
+ return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
+ return uploader.response_headers
+
+
+async def upload_substream_blocks(
+ service=None,
+ uploader_class=None,
+ total_size=None,
+ chunk_size=None,
+ max_concurrency=None,
+ stream=None,
+ **kwargs):
+ parallel = max_concurrency > 1
+ if parallel and 'modified_access_conditions' in kwargs:
+ # Access conditions do not work with parallelism
+ kwargs['modified_access_conditions'] = None
+ uploader = uploader_class(
+ service=service,
+ total_size=total_size,
+ chunk_size=chunk_size,
+ stream=stream,
+ parallel=parallel,
+ **kwargs)
+
+ if parallel:
+ upload_tasks = uploader.get_substream_blocks()
+ running_futures = [
+ asyncio.ensure_future(uploader.process_substream_block(u))
+ for u in islice(upload_tasks, 0, max_concurrency)
+ ]
+ range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures)
+ else:
+ range_ids = []
+ for block in uploader.get_substream_blocks():
+ range_ids.append(await uploader.process_substream_block(block))
+ return sorted(range_ids)
+
+
+class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
+
+ def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs):
+ self.service = service
+ self.total_size = total_size
+ self.chunk_size = chunk_size
+ self.stream = stream
+ self.parallel = parallel
+
+ # Stream management
+ self.stream_start = stream.tell() if parallel else None
+ self.stream_lock = threading.Lock() if parallel else None
+
+ # Progress feedback
+ self.progress_total = 0
+ self.progress_lock = Lock() if parallel else None
+
+ # Encryption
+ self.encryptor = encryptor
+ self.padder = padder
+ self.response_headers = None
+ self.etag = None
+ self.last_modified = None
+ self.request_options = kwargs
+
+ def get_chunk_streams(self):
+ index = 0
+ while True:
+ data = b''
+ read_size = self.chunk_size
+
+ # Buffer until we either reach the end of the stream or get a whole chunk.
+ while True:
+ if self.total_size:
+ read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
+ temp = self.stream.read(read_size)
+ if not isinstance(temp, six.binary_type):
+ raise TypeError('Blob data should be of type bytes.')
+ data += temp or b""
+
+ # We have read an empty string and so are at the end
+ # of the buffer or we have read a full chunk.
+ if temp == b'' or len(data) == self.chunk_size:
+ break
+
+ if len(data) == self.chunk_size:
+ if self.padder:
+ data = self.padder.update(data)
+ if self.encryptor:
+ data = self.encryptor.update(data)
+ yield index, data
+ else:
+ if self.padder:
+ data = self.padder.update(data) + self.padder.finalize()
+ if self.encryptor:
+ data = self.encryptor.update(data) + self.encryptor.finalize()
+ if data:
+ yield index, data
+ break
+ index += len(data)
+
+ async def process_chunk(self, chunk_data):
+ chunk_bytes = chunk_data[1]
+ chunk_offset = chunk_data[0]
+ return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
+
+ async def _update_progress(self, length):
+ if self.progress_lock is not None:
+ async with self.progress_lock:
+ self.progress_total += length
+ else:
+ self.progress_total += length
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ async def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
+ range_id = await self._upload_chunk(chunk_offset, chunk_data)
+ await self._update_progress(len(chunk_data))
+ return range_id
+
+ def get_substream_blocks(self):
+ assert self.chunk_size is not None
+ lock = self.stream_lock
+ blob_length = self.total_size
+
+ if blob_length is None:
+ blob_length = get_length(self.stream)
+ if blob_length is None:
+ raise ValueError("Unable to determine content length of upload data.")
+
+ blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
+ last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
+
+ for i in range(blocks):
+ index = i * self.chunk_size
+ length = last_block_size if i == blocks - 1 else self.chunk_size
+ yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock))
+
+ async def process_substream_block(self, block_data):
+ return await self._upload_substream_block_with_progress(block_data[0], block_data[1])
+
+ async def _upload_substream_block(self, block_id, block_stream):
+ raise NotImplementedError("Must be implemented by child class.")
+
+ async def _upload_substream_block_with_progress(self, block_id, block_stream):
+ range_id = await self._upload_substream_block(block_id, block_stream)
+ await self._update_progress(len(block_stream))
+ return range_id
+
+ def set_response_properties(self, resp):
+ self.etag = resp.etag
+ self.last_modified = resp.last_modified
+
+
+class BlockBlobChunkUploader(_ChunkUploader):
+
+ def __init__(self, *args, **kwargs):
+ kwargs.pop('modified_access_conditions', None)
+ super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ # TODO: This is incorrect, but works with recording.
+ index = '{0:032d}'.format(chunk_offset)
+ block_id = encode_base64(url_quote(encode_base64(index)))
+ await self.service.stage_block(
+ block_id,
+ len(chunk_data),
+ chunk_data,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+ return index, block_id
+
+ async def _upload_substream_block(self, block_id, block_stream):
+ try:
+ await self.service.stage_block(
+ block_id,
+ len(block_stream),
+ block_stream,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+ finally:
+ block_stream.close()
+ return block_id
+
+
+class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def _is_chunk_empty(self, chunk_data):
+ # read until non-zero byte is encountered
+ # if reached the end without returning, then chunk_data is all 0's
+ for each_byte in chunk_data:
+ if each_byte not in [0, b'\x00']:
+ return False
+ return True
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ # avoid uploading the empty pages
+ if not self._is_chunk_empty(chunk_data):
+ chunk_end = chunk_offset + len(chunk_data) - 1
+ content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
+ computed_md5 = None
+ self.response_headers = await self.service.upload_pages(
+ chunk_data,
+ content_length=len(chunk_data),
+ transactional_content_md5=computed_md5,
+ range=content_range,
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+
+ if not self.parallel and self.request_options.get('modified_access_conditions'):
+ self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
+
+
+class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ def __init__(self, *args, **kwargs):
+ super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
+ self.current_length = None
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ if self.current_length is None:
+ self.response_headers = await self.service.append_block(
+ chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+ self.current_length = int(self.response_headers['blob_append_offset'])
+ else:
+ self.request_options['append_position_access_conditions'].append_position = \
+ self.current_length + chunk_offset
+ self.response_headers = await self.service.append_block(
+ chunk_data,
+ content_length=len(chunk_data),
+ cls=return_response_headers,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options)
+
+
+class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
+
+ async def _upload_chunk(self, chunk_offset, chunk_data):
+ chunk_end = chunk_offset + len(chunk_data) - 1
+ response = await self.service.upload_range(
+ chunk_data,
+ chunk_offset,
+ chunk_end,
+ data_stream_total=self.total_size,
+ upload_stream_current=self.progress_total,
+ **self.request_options
+ )
+ range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end)
+ return range_id, response
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared_access_signature.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared_access_signature.py
new file mode 100644
index 00000000000..16126da8d06
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_shared_access_signature.py
@@ -0,0 +1,596 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, TYPE_CHECKING
+)
+
+from ._shared import sign_string, url_quote
+from ._shared.constants import X_MS_VERSION
+from ._shared.models import Services
+from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \
+ QueryStringConstants
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from . import (
+ ResourceTypes,
+ AccountSasPermissions,
+ UserDelegationKey,
+ ContainerSasPermissions,
+ BlobSasPermissions
+ )
+
+
+class BlobQueryStringConstants(object):
+ SIGNED_TIMESTAMP = 'snapshot'
+
+
+class BlobSharedAccessSignature(SharedAccessSignature):
+ '''
+ Provides a factory for creating blob and container access
+ signature tokens with a common account name and account key. Users can either
+ use the factory or can construct the appropriate service and use the
+ generate_*_shared_access_signature method directly.
+ '''
+
+ def __init__(self, account_name, account_key=None, user_delegation_key=None):
+ '''
+ :param str account_name:
+ The storage account name used to generate the shared access signatures.
+ :param str account_key:
+ The access key to generate the shares access signatures.
+ :param ~.models.UserDelegationKey user_delegation_key:
+ Instead of an account key, the user could pass in a user delegation key.
+ A user delegation key can be obtained from the service by authenticating with an AAD identity;
+ this can be accomplished by calling get_user_delegation_key on any Blob service object.
+ '''
+ super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION)
+ self.user_delegation_key = user_delegation_key
+
+ def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None,
+ expiry=None, start=None, policy_id=None, ip=None, protocol=None,
+ cache_control=None, content_disposition=None,
+ content_encoding=None, content_language=None,
+ content_type=None, **kwargs):
+ '''
+ Generates a shared access signature for the blob or one of its snapshots.
+ Use the returned signature with the sas_token parameter of any BlobService.
+
+ :param str container_name:
+ Name of container.
+ :param str blob_name:
+ Name of blob.
+ :param str snapshot:
+ The snapshot parameter is an opaque DateTime value that,
+ when present, specifies the blob snapshot to grant permission.
+ :param BlobSasPermissions permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Permissions must be ordered read, write, delete, list.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: datetime or str
+ :param str policy_id:
+ A unique value up to 64 characters in length that correlates to a
+ stored access policy. To create a stored access policy, use
+ set_blob_service_properties.
+ :param str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :param str protocol:
+ Specifies the protocol permitted for a request made. The default value
+ is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+ :param str cache_control:
+ Response header value for Cache-Control when resource is accessed
+ using this shared access signature.
+ :param str content_disposition:
+ Response header value for Content-Disposition when resource is accessed
+ using this shared access signature.
+ :param str content_encoding:
+ Response header value for Content-Encoding when resource is accessed
+ using this shared access signature.
+ :param str content_language:
+ Response header value for Content-Language when resource is accessed
+ using this shared access signature.
+ :param str content_type:
+ Response header value for Content-Type when resource is accessed
+ using this shared access signature.
+ '''
+ resource_path = container_name + '/' + blob_name
+
+ sas = _BlobSharedAccessHelper()
+ sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+ sas.add_id(policy_id)
+
+ resource = 'bs' if snapshot else 'b'
+ resource = 'bv' if version_id else resource
+ resource = 'd' if kwargs.pop("is_directory", None) else resource
+ sas.add_resource(resource)
+
+ sas.add_timestamp(snapshot or version_id)
+ sas.add_override_response_headers(cache_control, content_disposition,
+ content_encoding, content_language,
+ content_type)
+ sas.add_info_for_hns_account(**kwargs)
+ sas.add_resource_signature(self.account_name, self.account_key, resource_path,
+ user_delegation_key=self.user_delegation_key)
+
+ return sas.get_token()
+
+ def generate_container(self, container_name, permission=None, expiry=None,
+ start=None, policy_id=None, ip=None, protocol=None,
+ cache_control=None, content_disposition=None,
+ content_encoding=None, content_language=None,
+ content_type=None, **kwargs):
+ '''
+ Generates a shared access signature for the container.
+ Use the returned signature with the sas_token parameter of any BlobService.
+
+ :param str container_name:
+ Name of container.
+ :param ContainerSasPermissions permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Permissions must be ordered read, write, delete, list.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: datetime or str
+ :param str policy_id:
+ A unique value up to 64 characters in length that correlates to a
+ stored access policy. To create a stored access policy, use
+ set_blob_service_properties.
+ :param str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :param str protocol:
+ Specifies the protocol permitted for a request made. The default value
+ is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values.
+ :param str cache_control:
+ Response header value for Cache-Control when resource is accessed
+ using this shared access signature.
+ :param str content_disposition:
+ Response header value for Content-Disposition when resource is accessed
+ using this shared access signature.
+ :param str content_encoding:
+ Response header value for Content-Encoding when resource is accessed
+ using this shared access signature.
+ :param str content_language:
+ Response header value for Content-Language when resource is accessed
+ using this shared access signature.
+ :param str content_type:
+ Response header value for Content-Type when resource is accessed
+ using this shared access signature.
+ '''
+ sas = _BlobSharedAccessHelper()
+ sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version)
+ sas.add_id(policy_id)
+ sas.add_resource('c')
+ sas.add_override_response_headers(cache_control, content_disposition,
+ content_encoding, content_language,
+ content_type)
+ sas.add_info_for_hns_account(**kwargs)
+ sas.add_resource_signature(self.account_name, self.account_key, container_name,
+ user_delegation_key=self.user_delegation_key)
+ return sas.get_token()
+
+
+class _BlobSharedAccessHelper(_SharedAccessHelper):
+
+ def add_timestamp(self, timestamp):
+ self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp)
+
+ def add_info_for_hns_account(self, **kwargs):
+ self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None))
+ self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None))
+ self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None))
+ self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None))
+
+ def get_value_to_append(self, query):
+ return_value = self.query_dict.get(query) or ''
+ return return_value + '\n'
+
+ def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None):
+ # pylint: disable = no-member
+ if path[0] != '/':
+ path = '/' + path
+
+ canonicalized_resource = '/blob/' + account_name + path + '\n'
+
+ # Form the string to sign from shared_access_policy and canonicalized
+ # resource. The order of values is important.
+ string_to_sign = \
+ (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_START) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) +
+ canonicalized_resource)
+
+ if user_delegation_key is not None:
+ self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid)
+ self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid)
+ self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start)
+ self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry)
+ self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service)
+ self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version)
+
+ string_to_sign += \
+ (self.get_value_to_append(QueryStringConstants.SIGNED_OID) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_TID) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID))
+ else:
+ string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER)
+
+ string_to_sign += \
+ (self.get_value_to_append(QueryStringConstants.SIGNED_IP) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) +
+ self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) +
+ self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE))
+
+ # remove the trailing newline
+ if string_to_sign[-1] == '\n':
+ string_to_sign = string_to_sign[:-1]
+
+ self._add_query(QueryStringConstants.SIGNED_SIGNATURE,
+ sign_string(account_key if user_delegation_key is None else user_delegation_key.value,
+ string_to_sign))
+
+ def get_token(self):
+ # a conscious decision was made to exclude the timestamp in the generated token
+ # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp
+ exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP]
+ return '&'.join(['{0}={1}'.format(n, url_quote(v))
+ for n, v in self.query_dict.items() if v is not None and n not in exclude])
+
+
+def generate_account_sas(
+ account_name, # type: str
+ account_key, # type: str
+ resource_types, # type: Union[ResourceTypes, str]
+ permission, # type: Union[AccountSasPermissions, str]
+ expiry, # type: Optional[Union[datetime, str]]
+ start=None, # type: Optional[Union[datetime, str]]
+ ip=None, # type: Optional[str]
+ **kwargs # type: Any
+ ): # type: (...) -> str
+ """Generates a shared access signature for the blob service.
+
+ Use the returned signature with the credential parameter of any BlobServiceClient,
+ ContainerClient or BlobClient.
+
+ :param str account_name:
+ The storage account name used to generate the shared access signature.
+ :param str account_key:
+ The account key, also called shared key or access key, to generate the shared access signature.
+ :param resource_types:
+ Specifies the resource types that are accessible with the account SAS.
+ :type resource_types: str or ~.ResourceTypes
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~.AccountSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: ~datetime.datetime or str
+ :param str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :keyword str protocol:
+ Specifies the protocol permitted for a request made. The default value is https.
+ :return: A Shared Access Signature (sas) token.
+ :rtype: str
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication.py
+ :start-after: [START create_sas_token]
+ :end-before: [END create_sas_token]
+ :language: python
+ :dedent: 8
+ :caption: Generating a shared access signature.
+ """
+ sas = SharedAccessSignature(account_name, account_key)
+ return sas.generate_account(
+ services=Services(blob=True),
+ resource_types=resource_types,
+ permission=permission,
+ expiry=expiry,
+ start=start,
+ ip=ip,
+ **kwargs
+ ) # type: ignore
+
+
+def generate_container_sas(
+ account_name, # type: str
+ container_name, # type: str
+ account_key=None, # type: Optional[str]
+ user_delegation_key=None, # type: Optional[UserDelegationKey]
+ permission=None, # type: Optional[Union[ContainerSasPermissions, str]]
+ expiry=None, # type: Optional[Union[datetime, str]]
+ start=None, # type: Optional[Union[datetime, str]]
+ policy_id=None, # type: Optional[str]
+ ip=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Any
+ """Generates a shared access signature for a container.
+
+ Use the returned signature with the credential parameter of any BlobServiceClient,
+ ContainerClient or BlobClient.
+
+ :param str account_name:
+ The storage account name used to generate the shared access signature.
+ :param str container_name:
+ The name of the container.
+ :param str account_key:
+ The account key, also called shared key or access key, to generate the shared access signature.
+ Either `account_key` or `user_delegation_key` must be specified.
+ :param ~.UserDelegationKey user_delegation_key:
+ Instead of an account shared key, the user could pass in a user delegation key.
+ A user delegation key can be obtained from the service by authenticating with an AAD identity;
+ this can be accomplished by calling :func:`~.BlobServiceClient.get_user_delegation_key`.
+ When present, the SAS is signed with the user delegation key instead.
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Permissions must be ordered read, write, delete, list.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~.ContainerSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: ~datetime.datetime or str
+ :param str policy_id:
+ A unique value up to 64 characters in length that correlates to a
+ stored access policy. To create a stored access policy, use
+ :func:`~.ContainerClient.set_container_access_policy`.
+ :param str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :keyword str protocol:
+ Specifies the protocol permitted for a request made. The default value is https.
+ :keyword str cache_control:
+ Response header value for Cache-Control when resource is accessed
+ using this shared access signature.
+ :keyword str content_disposition:
+ Response header value for Content-Disposition when resource is accessed
+ using this shared access signature.
+ :keyword str content_encoding:
+ Response header value for Content-Encoding when resource is accessed
+ using this shared access signature.
+ :keyword str content_language:
+ Response header value for Content-Language when resource is accessed
+ using this shared access signature.
+ :keyword str content_type:
+ Response header value for Content-Type when resource is accessed
+ using this shared access signature.
+ :return: A Shared Access Signature (sas) token.
+ :rtype: str
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers.py
+ :start-after: [START generate_sas_token]
+ :end-before: [END generate_sas_token]
+ :language: python
+ :dedent: 12
+ :caption: Generating a sas token.
+ """
+ if not user_delegation_key and not account_key:
+ raise ValueError("Either user_delegation_key or account_key must be provided.")
+
+ if user_delegation_key:
+ sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key)
+ else:
+ sas = BlobSharedAccessSignature(account_name, account_key=account_key)
+ return sas.generate_container(
+ container_name,
+ permission=permission,
+ expiry=expiry,
+ start=start,
+ policy_id=policy_id,
+ ip=ip,
+ **kwargs
+ )
+
+
+def generate_blob_sas(
+ account_name, # type: str
+ container_name, # type: str
+ blob_name, # type: str
+ snapshot=None, # type: Optional[str]
+ account_key=None, # type: Optional[str]
+ user_delegation_key=None, # type: Optional[UserDelegationKey]
+ permission=None, # type: Optional[Union[BlobSasPermissions, str]]
+ expiry=None, # type: Optional[Union[datetime, str]]
+ start=None, # type: Optional[Union[datetime, str]]
+ policy_id=None, # type: Optional[str]
+ ip=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Any
+ """Generates a shared access signature for a blob.
+
+ Use the returned signature with the credential parameter of any BlobServiceClient,
+ ContainerClient or BlobClient.
+
+ :param str account_name:
+ The storage account name used to generate the shared access signature.
+ :param str container_name:
+ The name of the container.
+ :param str blob_name:
+ The name of the blob.
+ :param str snapshot:
+ An optional blob snapshot ID.
+ :param str account_key:
+ The account key, also called shared key or access key, to generate the shared access signature.
+ Either `account_key` or `user_delegation_key` must be specified.
+ :param ~.UserDelegationKey user_delegation_key:
+ Instead of an account shared key, the user could pass in a user delegation key.
+ A user delegation key can be obtained from the service by authenticating with an AAD identity;
+ this can be accomplished by calling :func:`~.BlobServiceClient.get_user_delegation_key`.
+ When present, the SAS is signed with the user delegation key instead.
+ :param permission:
+ The permissions associated with the shared access signature. The
+ user is restricted to operations allowed by the permissions.
+ Permissions must be ordered read, write, delete, list.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has been
+ specified in an associated stored access policy.
+ :type permission: str or ~.BlobSasPermissions
+ :param expiry:
+ The time at which the shared access signature becomes invalid.
+ Required unless an id is given referencing a stored access policy
+ which contains this field. This field must be omitted if it has
+ been specified in an associated stored access policy. Azure will always
+ convert values to UTC. If a date is passed in without timezone info, it
+ is assumed to be UTC.
+ :type expiry: ~datetime.datetime or str
+ :param start:
+ The time at which the shared access signature becomes valid. If
+ omitted, start time for this call is assumed to be the time when the
+ storage service receives the request. Azure will always convert values
+ to UTC. If a date is passed in without timezone info, it is assumed to
+ be UTC.
+ :type start: ~datetime.datetime or str
+ :param str policy_id:
+ A unique value up to 64 characters in length that correlates to a
+ stored access policy. To create a stored access policy, use
+ :func:`~.ContainerClient.set_container_access_policy()`.
+ :param str ip:
+ Specifies an IP address or a range of IP addresses from which to accept requests.
+ If the IP address from which the request originates does not match the IP address
+ or address range specified on the SAS token, the request is not authenticated.
+ For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
+ restricts the request to those IP addresses.
+ :keyword str version_id:
+ An optional blob version ID. This parameter is only for versioning enabled account
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+ :keyword str protocol:
+ Specifies the protocol permitted for a request made. The default value is https.
+ :keyword str cache_control:
+ Response header value for Cache-Control when resource is accessed
+ using this shared access signature.
+ :keyword str content_disposition:
+ Response header value for Content-Disposition when resource is accessed
+ using this shared access signature.
+ :keyword str content_encoding:
+ Response header value for Content-Encoding when resource is accessed
+ using this shared access signature.
+ :keyword str content_language:
+ Response header value for Content-Language when resource is accessed
+ using this shared access signature.
+ :keyword str content_type:
+ Response header value for Content-Type when resource is accessed
+ using this shared access signature.
+ :return: A Shared Access Signature (sas) token.
+ :rtype: str
+ """
+ if not user_delegation_key and not account_key:
+ raise ValueError("Either user_delegation_key or account_key must be provided.")
+ version_id = kwargs.pop('version_id', None)
+ if version_id and snapshot:
+ raise ValueError("snapshot and version_id cannot be set at the same time.")
+ if user_delegation_key:
+ sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key)
+ else:
+ sas = BlobSharedAccessSignature(account_name, account_key=account_key)
+ return sas.generate_blob(
+ container_name,
+ blob_name,
+ snapshot=snapshot,
+ version_id=version_id,
+ permission=permission,
+ expiry=expiry,
+ start=start,
+ policy_id=policy_id,
+ ip=ip,
+ **kwargs
+ )
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_upload_helpers.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_upload_helpers.py
new file mode 100644
index 00000000000..e8b02936696
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_upload_helpers.py
@@ -0,0 +1,296 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+
+from io import SEEK_SET, UnsupportedOperation
+from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import
+
+import six
+from azure.core.exceptions import ResourceExistsError, ResourceModifiedError
+
+from ._shared.response_handlers import (
+ process_storage_error,
+ return_response_headers)
+from ._shared.models import StorageErrorCode
+from ._shared.uploads import (
+ upload_data_chunks,
+ upload_substream_blocks,
+ BlockBlobChunkUploader,
+ PageBlobChunkUploader,
+ AppendBlobChunkUploader)
+from ._shared.encryption import generate_blob_encryption_data, encrypt_blob
+from ._generated.models import (
+ StorageErrorException,
+ BlockLookupList,
+ AppendPositionAccessConditions,
+ ModifiedAccessConditions,
+)
+
+if TYPE_CHECKING:
+ from datetime import datetime # pylint: disable=unused-import
+ BlobLeaseClient = TypeVar("BlobLeaseClient")
+
+_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
+_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.'
+
+
+def _convert_mod_error(error):
+ message = error.message.replace(
+ "The condition specified using HTTP conditional header(s) is not met.",
+ "The specified blob already exists.")
+ message = message.replace("ConditionNotMet", "BlobAlreadyExists")
+ overwrite_error = ResourceExistsError(
+ message=message,
+ response=error.response,
+ error=error)
+ overwrite_error.error_code = StorageErrorCode.blob_already_exists
+ raise overwrite_error
+
+
+def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument
+ return any([
+ modified_access_conditions.if_modified_since,
+ modified_access_conditions.if_unmodified_since,
+ modified_access_conditions.if_none_match,
+ modified_access_conditions.if_match
+ ])
+
+
+def upload_block_blob( # pylint: disable=too-many-locals
+ client=None,
+ data=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ headers=None,
+ validate_content=None,
+ max_concurrency=None,
+ blob_settings=None,
+ encryption_options=None,
+ **kwargs):
+ try:
+ if not overwrite and not _any_conditions(**kwargs):
+ kwargs['modified_access_conditions'].if_none_match = '*'
+ adjusted_count = length
+ if (encryption_options.get('key') is not None) and (adjusted_count is not None):
+ adjusted_count += (16 - (length % 16))
+ blob_headers = kwargs.pop('blob_headers', None)
+ tier = kwargs.pop('standard_blob_tier', None)
+ blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+ # Do single put if the size is smaller than or equal config.max_single_put_size
+ if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size):
+ try:
+ data = data.read(length)
+ if not isinstance(data, six.binary_type):
+ raise TypeError('Blob data should be of type bytes.')
+ except AttributeError:
+ pass
+ if encryption_options.get('key'):
+ encryption_data, data = encrypt_blob(data, encryption_options['key'])
+ headers['x-ms-meta-encryptiondata'] = encryption_data
+ return client.upload(
+ data,
+ content_length=adjusted_count,
+ blob_http_headers=blob_headers,
+ headers=headers,
+ cls=return_response_headers,
+ validate_content=validate_content,
+ data_stream_total=adjusted_count,
+ upload_stream_current=0,
+ tier=tier.value if tier else None,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+
+ use_original_upload_path = blob_settings.use_byte_buffer or \
+ validate_content or encryption_options.get('required') or \
+ blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \
+ hasattr(stream, 'seekable') and not stream.seekable() or \
+ not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+ if use_original_upload_path:
+ if encryption_options.get('key'):
+ cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key'])
+ headers['x-ms-meta-encryptiondata'] = encryption_data
+ encryption_options['cek'] = cek
+ encryption_options['vector'] = iv
+ block_ids = upload_data_chunks(
+ service=client,
+ uploader_class=BlockBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ max_concurrency=max_concurrency,
+ stream=stream,
+ validate_content=validate_content,
+ encryption_options=encryption_options,
+ headers=headers,
+ **kwargs
+ )
+ else:
+ block_ids = upload_substream_blocks(
+ service=client,
+ uploader_class=BlockBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ max_concurrency=max_concurrency,
+ stream=stream,
+ validate_content=validate_content,
+ headers=headers,
+ **kwargs
+ )
+
+ block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
+ block_lookup.latest = block_ids
+ return client.commit_block_list(
+ block_lookup,
+ blob_http_headers=blob_headers,
+ cls=return_response_headers,
+ validate_content=validate_content,
+ headers=headers,
+ tier=tier.value if tier else None,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+ except StorageErrorException as error:
+ try:
+ process_storage_error(error)
+ except ResourceModifiedError as mod_error:
+ if not overwrite:
+ _convert_mod_error(mod_error)
+ raise
+
+
+def upload_page_blob(
+ client=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ headers=None,
+ validate_content=None,
+ max_concurrency=None,
+ blob_settings=None,
+ encryption_options=None,
+ **kwargs):
+ try:
+ if not overwrite and not _any_conditions(**kwargs):
+ kwargs['modified_access_conditions'].if_none_match = '*'
+ if length is None or length < 0:
+ raise ValueError("A content length must be specified for a Page Blob.")
+ if length % 512 != 0:
+ raise ValueError("Invalid page blob size: {0}. "
+ "The size must be aligned to a 512-byte boundary.".format(length))
+ if kwargs.get('premium_page_blob_tier'):
+ premium_page_blob_tier = kwargs.pop('premium_page_blob_tier')
+ try:
+ headers['x-ms-access-tier'] = premium_page_blob_tier.value
+ except AttributeError:
+ headers['x-ms-access-tier'] = premium_page_blob_tier
+ if encryption_options and encryption_options.get('data'):
+ headers['x-ms-meta-encryptiondata'] = encryption_options['data']
+ blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+ response = client.create(
+ content_length=0,
+ blob_content_length=length,
+ blob_sequence_number=None,
+ blob_http_headers=kwargs.pop('blob_headers', None),
+ blob_tags_string=blob_tags_string,
+ cls=return_response_headers,
+ headers=headers,
+ **kwargs)
+ if length == 0:
+ return response
+
+ kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag'])
+ return upload_data_chunks(
+ service=client,
+ uploader_class=PageBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_page_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ encryption_options=encryption_options,
+ headers=headers,
+ **kwargs)
+
+ except StorageErrorException as error:
+ try:
+ process_storage_error(error)
+ except ResourceModifiedError as mod_error:
+ if not overwrite:
+ _convert_mod_error(mod_error)
+ raise
+
+
+def upload_append_blob( # pylint: disable=unused-argument
+ client=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ headers=None,
+ validate_content=None,
+ max_concurrency=None,
+ blob_settings=None,
+ encryption_options=None,
+ **kwargs):
+ try:
+ if length == 0:
+ return {}
+ blob_headers = kwargs.pop('blob_headers', None)
+ append_conditions = AppendPositionAccessConditions(
+ max_size=kwargs.pop('maxsize_condition', None),
+ append_position=None)
+ blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+ try:
+ if overwrite:
+ client.create(
+ content_length=0,
+ blob_http_headers=blob_headers,
+ headers=headers,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+ return upload_data_chunks(
+ service=client,
+ uploader_class=AppendBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ append_position_access_conditions=append_conditions,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ if error.response.status_code != 404:
+ raise
+ # rewind the request body if it is a stream
+ if hasattr(stream, 'read'):
+ try:
+ # attempt to rewind the body to the initial position
+ stream.seek(0, SEEK_SET)
+ except UnsupportedOperation:
+ # if body is not seekable, then retry would not work
+ raise error
+ client.create(
+ content_length=0,
+ blob_http_headers=blob_headers,
+ headers=headers,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+ return upload_data_chunks(
+ service=client,
+ uploader_class=AppendBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ append_position_access_conditions=append_conditions,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_version.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_version.py
new file mode 100644
index 00000000000..c9d0e600320
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/_version.py
@@ -0,0 +1,7 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+VERSION = "12.6.0"
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/__init__.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/__init__.py
new file mode 100644
index 00000000000..247f39e1ffd
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/__init__.py
@@ -0,0 +1,137 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import os
+
+from .._models import BlobType
+from .._shared.policies_async import ExponentialRetry, LinearRetry
+from ._blob_client_async import BlobClient
+from ._container_client_async import ContainerClient
+from ._blob_service_client_async import BlobServiceClient
+from ._lease_async import BlobLeaseClient
+from ._download_async import StorageStreamDownloader
+
+
+async def upload_blob_to_url(
+ blob_url, # type: str
+ data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ credential=None, # type: Any
+ **kwargs):
+ # type: (...) -> dict[str, Any]
+ """Upload data to a given URL
+
+ The data will be uploaded as a block blob.
+
+ :param str blob_url:
+ The full URI to the blob. This can also include a SAS token.
+ :param data:
+ The data to upload. This can be bytes, text, an iterable or a file-like object.
+ :type data: bytes or str or Iterable
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ blob URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword bool overwrite:
+ Whether the blob to be uploaded should overwrite the current data.
+ If True, upload_blob_to_url will overwrite any existing data. If set to False, the
+ operation will fail with a ResourceExistsError.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword dict(str,str) metadata:
+ Name-value pairs associated with the blob as metadata.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https as https (the default) will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used, because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword str encoding:
+ Encoding to use if text is supplied as input. Defaults to UTF-8.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: dict(str, Any)
+ """
+ async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+ return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs)
+
+
+async def _download_to_stream(client, handle, **kwargs):
+ """Download data to specified open file-handle."""
+ stream = await client.download_blob(**kwargs)
+ await stream.readinto(handle)
+
+
+async def download_blob_from_url(
+ blob_url, # type: str
+ output, # type: str
+ credential=None, # type: Any
+ **kwargs):
+ # type: (...) -> None
+ """Download the contents of a blob to a local file or stream.
+
+ :param str blob_url:
+ The full URI to the blob. This can also include a SAS token.
+ :param output:
+ Where the data should be downloaded to. This could be either a file path to write to,
+ or an open IO handle to write to.
+ :type output: str or writable stream
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
+ an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword bool overwrite:
+ Whether the local file should be overwritten if it already exists. The default value is
+ `False` - in which case a ValueError will be raised if the file already exists. If set to
+ `True`, an attempt will be made to write to the existing file. If a stream handle is passed
+ in, this value is ignored.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword int offset:
+ Start of byte range to use for downloading a section of the blob.
+ Must be set if length is provided.
+ :keyword int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https as https (the default) will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used, because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :rtype: None
+ """
+ overwrite = kwargs.pop('overwrite', False)
+ async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+ if hasattr(output, 'write'):
+ await _download_to_stream(client, output, **kwargs)
+ else:
+ if not overwrite and os.path.isfile(output):
+ raise ValueError("The file '{}' already exists.".format(output))
+ with open(output, 'wb') as file_handle:
+ await _download_to_stream(client, file_handle, **kwargs)
+
+
+__all__ = [
+ 'upload_blob_to_url',
+ 'download_blob_from_url',
+ 'BlobServiceClient',
+ 'ContainerClient',
+ 'BlobClient',
+ 'BlobLeaseClient',
+ 'ExponentialRetry',
+ 'LinearRetry',
+ 'StorageStreamDownloader'
+]
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_blob_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_blob_client_async.py
new file mode 100644
index 00000000000..6b08edcd2fe
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_blob_client_async.py
@@ -0,0 +1,2333 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, invalid-overridden-method
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
+ TYPE_CHECKING
+)
+
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.exceptions import ResourceNotFoundError
+
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin
+from .._shared.policies_async import ExponentialRetry
+from .._shared.response_handlers import return_response_headers, process_storage_error
+from .._deserialize import get_page_ranges_result, parse_tags
+from .._serialize import get_modify_conditions, get_api_version, get_access_conditions
+from .._generated import VERSION
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import StorageErrorException, CpkInfo
+from .._deserialize import deserialize_blob_properties
+from .._blob_client import BlobClient as BlobClientBase
+from ._upload_helpers import (
+ upload_block_blob,
+ upload_append_blob,
+ upload_page_blob)
+from .._models import BlobType, BlobBlock, BlobProperties
+from ._lease_async import BlobLeaseClient
+from ._download_async import StorageStreamDownloader
+
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from .._models import ( # pylint: disable=unused-import
+ ContentSettings,
+ PremiumPageBlobTier,
+ StandardBlobTier,
+ SequenceNumberAction
+ )
+
+
+class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods
+ """A client to interact with a specific blob, although that blob may not yet exist.
+
+ :param str account_url:
+ The URI to the storage account. In order to create a client given the full URI to the blob,
+ use the :func:`from_blob_url` classmethod.
+ :param container_name: The container name for the blob.
+ :type container_name: str
+ :param blob_name: The name of the blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type blob_name: str
+ :param str snapshot:
+ The optional blob snapshot on which to operate. This can be the snapshot ID string
+ or the response returned from :func:`create_snapshot`.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword str api_version:
+ The Storage API version to use for requests. Default value is '2019-07-07'.
+ Setting to an older version may result in reduced feature compatibility.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str secondary_hostname:
+ The hostname of the secondary endpoint.
+ :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+ Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+ uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+ the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+ :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+ algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+ :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+ :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+ the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+ :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+ or 4MB.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication_async.py
+ :start-after: [START create_blob_client]
+ :end-before: [END create_blob_client]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobClient from a URL to a public blob (no auth needed).
+
+ .. literalinclude:: ../samples/blob_samples_authentication_async.py
+ :start-after: [START create_blob_client_sas_url]
+ :end-before: [END create_blob_client_sas_url]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobClient from a SAS URL to a blob.
+ """
+ def __init__(
+ self, account_url, # type: str
+ container_name, # type: str
+ blob_name, # type: str
+ snapshot=None, # type: Optional[Union[str, Dict[str, Any]]]
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+ super(BlobClient, self).__init__(
+ account_url,
+ container_name=container_name,
+ blob_name=blob_name,
+ snapshot=snapshot,
+ credential=credential,
+ **kwargs)
+ self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
+ self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access
+ self._loop = kwargs.get('loop', None)
+
+ @distributed_trace_async
+ async def get_account_information(self, **kwargs): # type: ignore
+ # type: (Optional[int]) -> Dict[str, str]
+ """Gets information related to the storage account in which the blob resides.
+
+ The information can also be retrieved if the user has a SAS to a container or blob.
+ The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+ :returns: A dict of account information (SKU and account type).
+ :rtype: dict(str, str)
+ """
+ try:
+ return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def upload_blob(
+ self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ blob_type=BlobType.BlockBlob, # type: Union[str, BlobType]
+ length=None, # type: Optional[int]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Any
+ """Creates a new blob from a data source with automatic chunking.
+
+ :param data: The blob data to upload.
+ :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+ either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+ If True, upload_blob will overwrite the existing data. If set to False, the
+ operation will fail with ResourceExistsError. The exception to the above is with Append
+ blob types: if set to False and the data already exists, an error will not be raised
+ and the data will be appended to the existing blob. If set overwrite=True, then the existing
+ append blob will be deleted, and a new one created. Defaults to False.
+ :keyword ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ If specified, upload_blob only succeeds if the
+ blob's lease is active and matches this ID.
+ Required if the blob has an active lease.
+ :paramtype: ~azure.storage.blob.aio.BlobLeaseClient
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int max_concurrency:
+ Maximum number of parallel connections to use when the blob size exceeds
+ 64MB.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+ :start-after: [START upload_a_blob]
+ :end-before: [END upload_a_blob]
+ :language: python
+ :dedent: 16
+ :caption: Upload a blob to the container.
+ """
+ options = self._upload_blob_options(
+ data,
+ blob_type=blob_type,
+ length=length,
+ metadata=metadata,
+ **kwargs)
+ if blob_type == BlobType.BlockBlob:
+ return await upload_block_blob(**options)
+ if blob_type == BlobType.PageBlob:
+ return await upload_page_blob(**options)
+ return await upload_append_blob(**options)
+
+ @distributed_trace_async
+ async def download_blob(self, offset=None, length=None, **kwargs):
+ # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader
+ """Downloads a blob to the StorageStreamDownloader. The readall() method must
+ be used to read all the content or readinto() must be used to download the blob into
+ a stream.
+
+ :param int offset:
+ Start of byte range to use for downloading a section of the blob.
+ Must be set if length is provided.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to download.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the blob has an active lease. If specified, download_blob only
+ succeeds if the blob's lease is active and matches this ID. Value can be a
+ BlobLeaseClient object or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword str encoding:
+ Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: A streaming object (StorageStreamDownloader)
+ :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+ :start-after: [START download_a_blob]
+ :end-before: [END download_a_blob]
+ :language: python
+ :dedent: 16
+ :caption: Download a blob.
+ """
+ options = self._download_blob_options(
+ offset=offset,
+ length=length,
+ **kwargs)
+ downloader = StorageStreamDownloader(**options)
+ await downloader._setup() # pylint: disable=protected-access
+ return downloader
+
+ @distributed_trace_async
+ async def delete_blob(self, delete_snapshots=False, **kwargs):
+ # type: (str, Any) -> None
+ """Marks the specified blob for deletion.
+
+ The blob is later deleted during garbage collection.
+ Note that in order to delete a blob, you must delete all of its
+ snapshots. You can delete both at the same time with the delete_blob()
+ operation.
+
+ If a delete retention policy is enabled for the service, then this operation soft deletes the blob
+ and retains the blob for a specified number of days.
+ After the specified number of days, the blob's data is removed from the service during garbage collection.
+ Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
+ option. Soft-deleted blob can be restored using :func:`undelete` operation.
+
+ :param str delete_snapshots:
+ Required if the blob has associated snapshots. Values include:
+ - "only": Deletes only the blobs snapshots.
+ - "include": Deletes the blob along with all snapshots.
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to delete.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword lease:
+ Required if the blob has an active lease. If specified, delete_blob only
+ succeeds if the blob's lease is active and matches this ID. Value can be a
+ BlobLeaseClient object or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+ :start-after: [START delete_blob]
+ :end-before: [END delete_blob]
+ :language: python
+ :dedent: 16
+ :caption: Delete a blob.
+ """
+ options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs)
+ try:
+ await self._client.blob.delete(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def undelete_blob(self, **kwargs):
+ # type: (Any) -> None
+ """Restores soft-deleted blobs or snapshots.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common_async.py
+ :start-after: [START undelete_blob]
+ :end-before: [END undelete_blob]
+ :language: python
+ :dedent: 12
+ :caption: Undeleting a blob.
+ """
+ try:
+ await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def exists(self, **kwargs):
+ # type: (**Any) -> bool
+ """
+ Returns True if a blob exists with the defined parameters, and returns
+ False otherwise.
+
+ :param str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to check if it exists.
+ :param int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: boolean
+ """
+ try:
+ await self._client.blob.get_properties(
+ snapshot=self.snapshot,
+ **kwargs)
+ return True
+ except StorageErrorException as error:
+ try:
+ process_storage_error(error)
+ except ResourceNotFoundError:
+ return False
+
+ @distributed_trace_async
+ async def get_blob_properties(self, **kwargs):
+ # type: (Any) -> BlobProperties
+ """Returns all user-defined metadata, standard HTTP properties, and
+ system properties for the blob. It does not return the content of the blob.
+
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to get properties.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: BlobProperties
+ :rtype: ~azure.storage.blob.BlobProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common_async.py
+ :start-after: [START get_blob_properties]
+ :end-before: [END get_blob_properties]
+ :language: python
+ :dedent: 12
+ :caption: Getting the properties for a blob.
+ """
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ cpk = kwargs.pop('cpk', None)
+ cpk_info = None
+ if cpk:
+ if self.scheme.lower() != 'https':
+ raise ValueError("Customer provided encryption key must be used over HTTPS.")
+ cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+ encryption_algorithm=cpk.algorithm)
+ try:
+ blob_props = await self._client.blob.get_properties(
+ timeout=kwargs.pop('timeout', None),
+ version_id=kwargs.pop('version_id', None),
+ snapshot=self.snapshot,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ cls=kwargs.pop('cls', None) or deserialize_blob_properties,
+ cpk_info=cpk_info,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ blob_props.name = self.blob_name
+ if isinstance(blob_props, BlobProperties):
+ blob_props.container = self.container_name
+ blob_props.snapshot = self.snapshot
+ return blob_props # type: ignore
+
+ @distributed_trace_async
+ async def set_http_headers(self, content_settings=None, **kwargs):
+ # type: (Optional[ContentSettings], Any) -> None
+ """Sets system properties on the blob.
+
+ If one property is set for the content_settings, all properties will be overridden.
+
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: Dict[str, Any]
+ """
+ options = self._set_http_headers_options(content_settings=content_settings, **kwargs)
+ try:
+ return await self._client.blob.set_http_headers(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def set_blob_metadata(self, metadata=None, **kwargs):
+ # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
+ """Sets user-defined metadata for the blob as one or more name-value pairs.
+
+ :param metadata:
+ Dict containing name and value pairs. Each call to this operation
+ replaces all existing metadata attached to the blob. To remove all
+ metadata from the blob, call this operation with no metadata headers.
+ :type metadata: dict(str, str)
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified)
+ """
+ options = self._set_blob_metadata_options(metadata=metadata, **kwargs)
+ try:
+ return await self._client.blob.set_metadata(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def create_page_blob( # type: ignore
+ self, size, # type: int
+ content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Creates a new Page Blob of the specified size.
+
+ :param int size:
+ This specifies the maximum size for the page blob, up to 1 TB.
+ The page blob size must be aligned to a 512-byte boundary.
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword int sequence_number:
+ Only for Page blobs. The sequence number is a user-controlled value that you can use to
+ track requests. The value of the sequence number must be between 0
+ and 2^63 - 1.The default value is 0.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict[str, Any]
+ """
+ options = self._create_page_blob_options(
+ size,
+ content_settings=content_settings,
+ metadata=metadata,
+ premium_page_blob_tier=premium_page_blob_tier,
+ **kwargs)
+ try:
+ return await self._client.page_blob.create(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def create_append_blob(self, content_settings=None, metadata=None, **kwargs):
+ # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
+ """Creates a new Append Blob.
+
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict[str, Any]
+ """
+ options = self._create_append_blob_options(
+ content_settings=content_settings,
+ metadata=metadata,
+ **kwargs)
+ try:
+ return await self._client.append_blob.create(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def create_snapshot(self, metadata=None, **kwargs):
+ # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]]
+ """Creates a snapshot of the blob.
+
+ A snapshot is a read-only version of a blob that's taken at a point in time.
+ It can be read, copied, or deleted, but not modified. Snapshots provide a way
+ to back up a blob as it appears at a moment in time.
+
+ A snapshot of a blob has the same name as the base blob from which the snapshot
+ is taken, with a DateTime value appended to indicate the time at which the
+ snapshot was taken.
+
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
+ :rtype: dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common_async.py
+ :start-after: [START create_blob_snapshot]
+ :end-before: [END create_blob_snapshot]
+ :language: python
+ :dedent: 12
+ :caption: Create a snapshot of the blob.
+ """
+ options = self._create_snapshot_options(metadata=metadata, **kwargs)
+ try:
+ return await self._client.blob.create_snapshot(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs):
+ # type: (str, Optional[Dict[str, str]], bool, Any) -> Any
+ """Copies a blob asynchronously.
+
+ This operation returns a copy operation
+ object that can be used to wait on the completion of the operation,
+ as well as check status or abort the copy operation.
+ The Blob service copies blobs on a best-effort basis.
+
+ The source blob for a copy operation may be a block blob, an append blob,
+ or a page blob. If the destination blob already exists, it must be of the
+ same blob type as the source blob. Any existing destination blob will be
+ overwritten. The destination blob cannot be modified while a copy operation
+ is in progress.
+
+ When copying from a page blob, the Blob service creates a destination page
+ blob of the source blob's length, initially containing all zeroes. Then
+ the source page ranges are enumerated, and non-empty ranges are copied.
+
+ For a block blob or an append blob, the Blob service creates a committed
+ blob of zero length before returning from this operation. When copying
+ from a block blob, all committed blocks and their block IDs are copied.
+ Uncommitted blocks are not copied. At the end of the copy operation, the
+ destination blob will have the same committed block count as the source.
+
+ When copying from an append blob, all committed blocks are copied. At the
+ end of the copy operation, the destination blob will have the same committed
+ block count as the source.
+
+ For all blob types, you can call status() on the returned polling object
+ to check the status of the copy operation, or wait() to block until the
+ operation is complete. The final blob will be committed when the copy completes.
+
+ :param str source_url:
+ A URL of up to 2 KB in length that specifies a file or blob.
+ The value should be URL-encoded as it would appear in a request URI.
+ If the source is in another account, the source must either be public
+ or must be authenticated via a shared access signature. If the source
+ is public, no authentication is required.
+ Examples:
+ https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+ https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=
+
+ https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+ :param metadata:
+ Name-value pairs associated with the blob as metadata. If no name-value
+ pairs are specified, the operation will copy the metadata from the
+ source blob or file to the destination blob. If one or more name-value
+ pairs are specified, the destination blob is created with the specified
+ metadata, and metadata is not copied from the source blob or file.
+ :type metadata: dict(str, str)
+ :param bool incremental_copy:
+ Copies the snapshot of the source page blob to a destination page blob.
+ The snapshot is copied such that only the differential changes between
+ the previously copied snapshot are transferred to the destination.
+ The copied snapshots are complete copies of the original snapshot and
+ can be read or copied from as usual. Defaults to False.
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only if the source
+ blob has been modified since the specified date/time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only if the source blob
+ has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only
+ if the destination blob has been modified since the specified date/time.
+ If the destination blob has not been modified, the Blob service returns
+ status code 412 (Precondition Failed).
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this conditional header to copy the blob only
+ if the destination blob has not been modified since the specified
+ date/time. If the destination blob has been modified, the Blob service
+ returns status code 412 (Precondition Failed).
+ :keyword str etag:
+ The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The destination match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword destination_lease:
+ The lease ID specified for this header must match the lease ID of the
+ destination blob. If the request does not include the lease ID or it is not
+ valid, the operation fails with status code 412 (Precondition Failed).
+ :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword source_lease:
+ Specify this to perform the Copy Blob operation only if
+ the lease ID given matches the active lease ID of the source blob.
+ :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+ Indicates the priority with which to rehydrate an archived blob
+ :keyword bool seal_destination_blob:
+ Seal the destination append blob. This operation is only for append blob.
+
+ .. versionadded:: 12.4.0
+
+ :keyword bool requires_sync:
+ Enforces that the service will not return a response until the copy is complete.
+ :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
+ :rtype: dict[str, str or ~datetime.datetime]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common_async.py
+ :start-after: [START copy_blob_from_url]
+ :end-before: [END copy_blob_from_url]
+ :language: python
+ :dedent: 16
+ :caption: Copy a blob from a URL.
+ """
+ options = self._start_copy_from_url_options(
+ source_url=self._encode_source_url(source_url),
+ metadata=metadata,
+ incremental_copy=incremental_copy,
+ **kwargs)
+ try:
+ if incremental_copy:
+ return await self._client.page_blob.copy_incremental(**options)
+ return await self._client.blob.start_copy_from_url(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def abort_copy(self, copy_id, **kwargs):
+ # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None
+ """Abort an ongoing copy operation.
+
+ This will leave a destination blob with zero length and full metadata.
+ This will raise an error if the copy operation has already ended.
+
+ :param copy_id:
+ The copy operation to abort. This can be either an ID, or an
+ instance of BlobProperties.
+ :type copy_id: str or ~azure.storage.blob.BlobProperties
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common_async.py
+ :start-after: [START abort_copy_blob_from_url]
+ :end-before: [END abort_copy_blob_from_url]
+ :language: python
+ :dedent: 16
+ :caption: Abort copying a blob from URL.
+ """
+ options = self._abort_copy_options(copy_id, **kwargs)
+ try:
+ await self._client.blob.abort_copy_from_url(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs):
+ # type: (int, Optional[str], Any) -> BlobLeaseClient
+ """Requests a new lease.
+
+ If the blob does not have an active lease, the Blob
+ Service creates a lease on the blob and returns a new lease.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The Blob Service
+ returns 400 (Invalid request) if the proposed lease ID is not
+ in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A BlobLeaseClient object.
+ :rtype: ~azure.storage.blob.aio.BlobLeaseClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common_async.py
+ :start-after: [START acquire_lease_on_blob]
+ :end-before: [END acquire_lease_on_blob]
+ :language: python
+ :dedent: 12
+ :caption: Acquiring a lease on a blob.
+ """
+ lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
+ await lease.acquire(lease_duration=lease_duration, **kwargs)
+ return lease
+
+ @distributed_trace_async
+ async def set_standard_blob_tier(self, standard_blob_tier, **kwargs):
+ # type: (Union[str, StandardBlobTier], Any) -> None
+ """This operation sets the tier on a block blob.
+
+ A block blob's tier determines Hot/Cool/Archive storage type.
+ This operation does not update the blob's ETag.
+
+ :param standard_blob_tier:
+ Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
+ 'Archive'. The hot tier is optimized for storing data that is accessed
+ frequently. The cool storage tier is optimized for storing data that
+ is infrequently accessed and stored for at least a month. The archive
+ tier is optimized for storing data that is rarely accessed and stored
+ for at least six months with flexible latency requirements.
+ :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+ :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+ Indicates the priority with which to rehydrate an archived blob
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :rtype: None
+ """
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if standard_blob_tier is None:
+ raise ValueError("A StandardBlobTier must be specified")
+ try:
+ await self._client.blob.set_tier(
+ tier=standard_blob_tier,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ lease_access_conditions=access_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def stage_block(
+ self, block_id, # type: str
+ data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Creates a new block to be committed as part of a blob.
+
+ :param str block_id: A string value that identifies the block.
+ The string should be less than or equal to 64 bytes in size.
+ For a given blob, the block_id must be the same size for each block.
+ :param data: The blob data.
+ :param int length: Size of the block.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ options = self._stage_block_options(
+ block_id,
+ data,
+ length=length,
+ **kwargs)
+ try:
+ return await self._client.block_blob.stage_block(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def stage_block_from_url(
+ self, block_id, # type: str
+ source_url, # type: str
+ source_offset=None, # type: Optional[int]
+ source_length=None, # type: Optional[int]
+ source_content_md5=None, # type: Optional[Union[bytes, bytearray]]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Creates a new block to be committed as part of a blob where
+ the contents are read from a URL.
+
+ :param str block_id: A string value that identifies the block.
+ The string should be less than or equal to 64 bytes in size.
+ For a given blob, the block_id must be the same size for each block.
+ :param str source_url: The URL.
+ :param int source_offset:
+ Start of byte range to use for the block.
+ Must be set if source length is provided.
+ :param int source_length: The size of the block in bytes.
+ :param bytearray source_content_md5:
+ Specify the md5 calculated for the range of
+ bytes that must be read from the copy source.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ options = self._stage_block_from_url_options(
+ block_id,
+ source_url=self._encode_source_url(source_url),
+ source_offset=source_offset,
+ source_length=source_length,
+ source_content_md5=source_content_md5,
+ **kwargs)
+ try:
+ return await self._client.block_blob.stage_block_from_url(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def get_block_list(self, block_list_type="committed", **kwargs):
+ # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]]
+ """The Get Block List operation retrieves the list of blocks that have
+ been uploaded as part of a block blob.
+
+ :param str block_list_type:
+ Specifies whether to return the list of committed
+ blocks, the list of uncommitted blocks, or both lists together.
+ Possible values include: 'committed', 'uncommitted', 'all'
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A tuple of two lists - committed and uncommitted blocks
+ :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock))
+ """
+ access_conditions = get_access_conditions(kwargs.pop('kease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ blocks = await self._client.block_blob.get_block_list(
+ list_type=block_list_type,
+ snapshot=self.snapshot,
+ timeout=kwargs.pop('timeout', None),
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return self._get_block_list_result(blocks)
+
+ @distributed_trace_async
+ async def commit_block_list( # type: ignore
+ self, block_list, # type: List[BlobBlock]
+ content_settings=None, # type: Optional[ContentSettings]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """The Commit Block List operation writes a blob by specifying the list of
+ block IDs that make up the blob.
+
+ :param list block_list:
+ List of Blockblobs.
+ :param ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict[str, str]
+ :keyword tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+ .. versionadded:: 12.4.0
+
+ :paramtype tags: dict(str, str)
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the page content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._commit_block_list_options(
+ block_list,
+ content_settings=content_settings,
+ metadata=metadata,
+ **kwargs)
+ try:
+ return await self._client.block_blob.commit_block_list(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs):
+ # type: (Union[str, PremiumPageBlobTier], **Any) -> None
+ """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
+
+ :param premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :rtype: None
+ """
+ access_conditions = get_access_conditions(kwargs.pop('lease', None))
+ mod_conditions = get_modify_conditions(kwargs)
+ if premium_page_blob_tier is None:
+ raise ValueError("A PremiumPageBlobTiermust be specified")
+ try:
+ await self._client.blob.set_tier(
+ tier=premium_page_blob_tier,
+ timeout=kwargs.pop('timeout', None),
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def set_blob_tags(self, tags=None, **kwargs):
+ # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any]
+ """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot.
+ Each call to this operation replaces all existing tags attached to the blob. To remove all
+ tags from the blob, call this operation with no tags set.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :param tags:
+ Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+ The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters,
+ and tag values must be between 0 and 256 characters.
+ Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+ space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+ :type tags: dict(str, str)
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to delete.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the tags content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified)
+ :rtype: Dict[str, Any]
+ """
+ options = self._set_blob_tags_options(tags=tags, **kwargs)
+ try:
+ return await self._client.blob.set_tags(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def get_blob_tags(self, **kwargs):
+ # type: (**Any) -> Dict[str, str]
+ """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to add tags to.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Key value pairs of blob tags.
+ :rtype: Dict[str, str]
+ """
+ options = self._get_blob_tags_options(**kwargs)
+ try:
+ _, tags = await self._client.blob.get_tags(**options)
+ return parse_tags(tags) # pylint: disable=protected-access
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def get_page_ranges( # type: ignore
+ self, offset=None, # type: Optional[int]
+ length=None, # type: Optional[int]
+ previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]]
+ **kwargs
+ ):
+ # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
+ """Returns the list of valid page ranges for a Page Blob or snapshot
+ of a page blob.
+
+ :param int offset:
+ Start of byte range to use for getting valid page ranges.
+ If no length is given, all bytes after the offset will be searched.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for getting valid page ranges.
+ If length is given, offset must be provided.
+ This range will return valid page ranges from the offset start up to
+ the specified length.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param str previous_snapshot_diff:
+ The snapshot diff parameter that contains an opaque DateTime value that
+ specifies a previous blob snapshot to be compared
+ against a more recent snapshot or the current blob.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns:
+ A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+ The first element are filled page ranges, the 2nd element is cleared page ranges.
+ :rtype: tuple(list(dict(str, str), list(dict(str, str))
+ """
+ options = self._get_page_ranges_options(
+ offset=offset,
+ length=length,
+ previous_snapshot_diff=previous_snapshot_diff,
+ **kwargs)
+ try:
+ if previous_snapshot_diff:
+ ranges = await self._client.page_blob.get_page_ranges_diff(**options)
+ else:
+ ranges = await self._client.page_blob.get_page_ranges(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return get_page_ranges_result(ranges)
+
+ @distributed_trace_async
+ async def get_page_range_diff_for_managed_disk(
+ self, previous_snapshot_url, # type: str
+ offset=None, # type: Optional[int]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
+ """Returns the list of valid page ranges for a managed disk or snapshot.
+
+ .. note::
+ This operation is only available for managed disk accounts.
+
+ .. versionadded:: 12.2.0
+ This operation was introduced in API version '2019-07-07'.
+
+ :param previous_snapshot_url:
+ Specifies the URL of a previous snapshot of the managed disk.
+ The response will only contain pages that were changed between the target blob and
+ its previous snapshot.
+ :param int offset:
+ Start of byte range to use for getting valid page ranges.
+ If no length is given, all bytes after the offset will be searched.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for getting valid page ranges.
+ If length is given, offset must be provided.
+ This range will return valid page ranges from the offset start up to
+ the specified length.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns:
+ A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+ The first element are filled page ranges, the 2nd element is cleared page ranges.
+ :rtype: tuple(list(dict(str, str), list(dict(str, str))
+ """
+ options = self._get_page_ranges_options(
+ offset=offset,
+ length=length,
+ prev_snapshot_url=previous_snapshot_url,
+ **kwargs)
+ try:
+ ranges = await self._client.page_blob.get_page_ranges_diff(**options)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return get_page_ranges_result(ranges)
+
+ @distributed_trace_async
+ async def set_sequence_number( # type: ignore
+ self, sequence_number_action, # type: Union[str, SequenceNumberAction]
+ sequence_number=None, # type: Optional[str]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets the blob sequence number.
+
+ :param str sequence_number_action:
+ This property indicates how the service should modify the blob's sequence
+ number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
+ :param str sequence_number:
+ This property sets the blob's sequence number. The sequence number is a
+ user-controlled property that you can use to track requests and manage
+ concurrency issues.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._set_sequence_number_options(
+ sequence_number_action, sequence_number=sequence_number, **kwargs)
+ try:
+ return await self._client.page_blob.update_sequence_number(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def resize_blob(self, size, **kwargs):
+ # type: (int, Any) -> Dict[str, Union[str, datetime]]
+ """Resizes a page blob to the specified size.
+
+ If the specified value is less than the current size of the blob,
+ then all pages above the specified value are cleared.
+
+ :param int size:
+ Size used to resize blob. Maximum size for a page blob is up to 1 TB.
+ The page blob size must be aligned to a 512-byte boundary.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._resize_blob_options(size, **kwargs)
+ try:
+ return await self._client.page_blob.resize(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def upload_page( # type: ignore
+ self, page, # type: bytes
+ offset, # type: int
+ length, # type: int
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """The Upload Pages operation writes a range of pages to a page blob.
+
+ :param bytes page:
+ Content of the page.
+ :param int offset:
+ Start of byte range to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the page content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword int if_sequence_number_lte:
+ If the blob's sequence number is less than or equal to
+ the specified value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_lt:
+ If the blob's sequence number is less than the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_eq:
+ If the blob's sequence number is equal to the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._upload_page_options(
+ page=page,
+ offset=offset,
+ length=length,
+ **kwargs)
+ try:
+ return await self._client.page_blob.upload_pages(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def upload_pages_from_url(self, source_url, # type: str
+ offset, # type: int
+ length, # type: int
+ source_offset, # type: int
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Any]
+ """
+ The Upload Pages operation writes a range of pages to a page blob where
+ the contents are read from a URL.
+
+ :param str source_url:
+ The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+ shared access signature attached.
+ :param int offset:
+ Start of byte range to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int source_offset:
+ This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+ The service will read the same number of bytes as the destination range (length-offset).
+ :keyword bytes source_content_md5:
+ If given, the service will calculate the MD5 hash of the block content and compare against this value.
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the source resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the source resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword int if_sequence_number_lte:
+ If the blob's sequence number is less than or equal to
+ the specified value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_lt:
+ If the blob's sequence number is less than the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_eq:
+ If the blob's sequence number is equal to the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The destination match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ """
+
+ options = self._upload_pages_from_url_options(
+ source_url=self._encode_source_url(source_url),
+ offset=offset,
+ length=length,
+ source_offset=source_offset,
+ **kwargs
+ )
+ try:
+ return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def clear_page(self, offset, length, **kwargs):
+ # type: (int, int, Any) -> Dict[str, Union[str, datetime]]
+ """Clears a range of pages.
+
+ :param int offset:
+ Start of byte range to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :param int length:
+ Number of bytes to use for writing to a section of the blob.
+ Pages must be aligned with 512-byte boundaries, the start offset
+ must be a modulus of 512 and the length must be a modulus of
+ 512.
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword int if_sequence_number_lte:
+ If the blob's sequence number is less than or equal to
+ the specified value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_lt:
+ If the blob's sequence number is less than the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword int if_sequence_number_eq:
+ If the blob's sequence number is equal to the specified
+ value, the request proceeds; otherwise it fails.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag and last modified).
+ :rtype: dict(str, Any)
+ """
+ options = self._clear_page_options(offset, length, **kwargs)
+ try:
+ return await self._client.page_blob.clear_pages(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def append_block( # type: ignore
+ self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]]
+ length=None, # type: Optional[int]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """Commits a new block of data to the end of the existing append blob.
+
+ :param data:
+ Content of the block.
+ :param int length:
+ Size of the block in bytes.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash of the block content. The storage
+ service checks the hash of the content that has arrived
+ with the hash that was sent. This is primarily valuable for detecting
+ bitflips on the wire if using http instead of https, as https (the default),
+ will already validate. Note that this MD5 hash is not stored with the
+ blob.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int appendpos_condition:
+ Optional conditional header, used only for the Append Block operation.
+ A number indicating the byte offset to compare. Append Block will
+ succeed only if the append position is equal to this number. If it
+ is not, the request will fail with the AppendPositionConditionNotMet error
+ (HTTP status code 412 - Precondition Failed).
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+ :rtype: dict(str, Any)
+ """
+ options = self._append_block_options(
+ data,
+ length=length,
+ **kwargs
+ )
+ try:
+ return await self._client.append_blob.append_block(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async()
+ async def append_block_from_url(self, copy_source_url, # type: str
+ source_offset=None, # type: Optional[int]
+ source_length=None, # type: Optional[int]
+ **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """
+ Creates a new block to be committed as part of a blob, where the contents are read from a source url.
+
+ :param str copy_source_url:
+ The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+ shared access signature attached.
+ :param int source_offset:
+ This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+ :param int source_length:
+ This indicates the end of the range of bytes that has to be taken from the copy source.
+ :keyword bytearray source_content_md5:
+ If given, the service will calculate the MD5 hash of the block content and compare against this value.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int appendpos_condition:
+ Optional conditional header, used only for the Append Block operation.
+ A number indicating the byte offset to compare. Append Block will
+ succeed only if the append position is equal to this number. If it
+ is not, the request will fail with the
+ AppendPositionConditionNotMet error
+ (HTTP status code 412 - Precondition Failed).
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The destination match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~datetime.datetime source_if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the source resource has been modified since the specified time.
+ :keyword ~datetime.datetime source_if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the source resource has not been modified since the specified date/time.
+ :keyword str source_etag:
+ The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions source_match_condition:
+ The source match condition to use upon the etag.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ """
+ options = self._append_block_from_url_options(
+ copy_source_url=self._encode_source_url(copy_source_url),
+ source_offset=source_offset,
+ source_length=source_length,
+ **kwargs
+ )
+ try:
+ return await self._client.append_blob.append_block_from_url(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async()
+ async def seal_append_blob(self, **kwargs):
+ # type: (...) -> Dict[str, Union[str, datetime, int]]
+ """The Seal operation seals the Append Blob to make it read-only.
+
+ .. versionadded:: 12.4.0
+
+ :keyword int appendpos_condition:
+ Optional conditional header, used only for the Append Block operation.
+ A number indicating the byte offset to compare. Append Block will
+ succeed only if the append position is equal to this number. If it
+ is not, the request will fail with the AppendPositionConditionNotMet error
+ (HTTP status code 412 - Precondition Failed).
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+ :rtype: dict(str, Any)
+ """
+ options = self._seal_append_blob_options(**kwargs)
+ try:
+ return await self._client.append_blob.seal(**options) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_blob_service_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_blob_service_client_async.py
new file mode 100644
index 00000000000..5d55df945ec
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_blob_service_client_async.py
@@ -0,0 +1,646 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+import functools
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, Dict, List,
+ TYPE_CHECKING
+)
+
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.async_paging import AsyncItemPaged
+
+from .._shared.models import LocationMode
+from .._shared.policies_async import ExponentialRetry
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
+from .._shared.response_handlers import return_response_headers, process_storage_error
+from .._shared.parser import _to_utc_datetime
+from .._shared.response_handlers import parse_to_internal_user_delegation_key
+from .._generated import VERSION
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo
+from .._blob_service_client import BlobServiceClient as BlobServiceClientBase
+from ._container_client_async import ContainerClient
+from ._blob_client_async import BlobClient
+from .._models import ContainerProperties
+from .._deserialize import service_stats_deserialize, service_properties_deserialize
+from .._serialize import get_api_version
+from ._models import ContainerPropertiesPaged, FilteredBlobPaged
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from azure.core.pipeline.transport import HttpTransport
+ from azure.core.pipeline.policies import HTTPPolicy
+ from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey
+ from ._lease_async import BlobLeaseClient
+ from .._models import (
+ BlobProperties,
+ PublicAccess,
+ BlobAnalyticsLogging,
+ Metrics,
+ CorsRule,
+ RetentionPolicy,
+ StaticWebsite,
+ )
+
+
+class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase):
+ """A client to interact with the Blob Service at the account level.
+
+ This client provides operations to retrieve and configure the account properties
+ as well as list, create and delete containers within the account.
+ For operations relating to a specific container or blob, clients for those entities
+ can also be retrieved using the `get_client` functions.
+
+ :param str account_url:
+ The URL to the blob storage account. Any other entities included
+ in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
+ authenticated with a SAS token.
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword str api_version:
+ The Storage API version to use for requests. Default value is '2019-07-07'.
+ Setting to an older version may result in reduced feature compatibility.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str secondary_hostname:
+ The hostname of the secondary endpoint.
+ :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+ Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+ uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+ the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+ :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+ algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+ :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+ :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+ the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+ :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+ or 4MB.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_authentication_async.py
+ :start-after: [START create_blob_service_client]
+ :end-before: [END create_blob_service_client]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobServiceClient with account url and credential.
+
+ .. literalinclude:: ../samples/blob_samples_authentication_async.py
+ :start-after: [START create_blob_service_client_oauth]
+ :end-before: [END create_blob_service_client_oauth]
+ :language: python
+ :dedent: 8
+ :caption: Creating the BlobServiceClient with Azure Identity credentials.
+ """
+
+ def __init__(
+ self, account_url, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+ super(BlobServiceClient, self).__init__(
+ account_url,
+ credential=credential,
+ **kwargs)
+ self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
+ self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access
+ self._loop = kwargs.get('loop', None)
+
+ @distributed_trace_async
+ async def get_user_delegation_key(self, key_start_time, # type: datetime
+ key_expiry_time, # type: datetime
+ **kwargs # type: Any
+ ):
+ # type: (...) -> UserDelegationKey
+ """
+ Obtain a user delegation key for the purpose of signing SAS tokens.
+ A token credential must be present on the service object for this request to succeed.
+
+ :param ~datetime.datetime key_start_time:
+ A DateTime value. Indicates when the key becomes valid.
+ :param ~datetime.datetime key_expiry_time:
+ A DateTime value. Indicates when the key stops being valid.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: The user delegation key.
+ :rtype: ~azure.storage.blob.UserDelegationKey
+ """
+ key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
+ timeout = kwargs.pop('timeout', None)
+ try:
+ user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info,
+ timeout=timeout,
+ **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore
+
+ @distributed_trace_async
+ async def get_account_information(self, **kwargs):
+ # type: (Any) -> Dict[str, str]
+ """Gets information related to the storage account.
+
+ The information can also be retrieved if the user has a SAS to a container or blob.
+ The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+ :returns: A dict of account information (SKU and account type).
+ :rtype: dict(str, str)
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START get_blob_service_account_info]
+ :end-before: [END get_blob_service_account_info]
+ :language: python
+ :dedent: 12
+ :caption: Getting account information for the blob service.
+ """
+ try:
+ return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def get_service_stats(self, **kwargs):
+ # type: (Any) -> Dict[str, Any]
+ """Retrieves statistics related to replication for the Blob service.
+
+ It is only available when read-access geo-redundant replication is enabled for
+ the storage account.
+
+ With geo-redundant replication, Azure Storage maintains your data durable
+ in two locations. In both locations, Azure Storage constantly maintains
+ multiple healthy replicas of your data. The location where you read,
+ create, update, or delete data is the primary storage account location.
+ The primary location exists in the region you choose at the time you
+ create an account via the Azure Management Azure classic portal, for
+ example, North Central US. The location to which your data is replicated
+ is the secondary location. The secondary location is automatically
+ determined based on the location of the primary; it is in a second data
+ center that resides in the same region as the primary location. Read-only
+ access is available from the secondary location, if read-access geo-redundant
+ replication is enabled for your storage account.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: The blob service stats.
+ :rtype: Dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START get_blob_service_stats]
+ :end-before: [END get_blob_service_stats]
+ :language: python
+ :dedent: 12
+ :caption: Getting service stats for the blob service.
+ """
+ timeout = kwargs.pop('timeout', None)
+ try:
+ stats = await self._client.service.get_statistics( # type: ignore
+ timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
+ return service_stats_deserialize(stats)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def get_service_properties(self, **kwargs):
+ # type: (Any) -> Dict[str, Any]
+ """Gets the properties of a storage account's Blob service, including
+ Azure Storage Analytics.
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An object containing blob service properties such as
+ analytics logging, hour/minute metrics, cors rules, etc.
+ :rtype: Dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START get_blob_service_properties]
+ :end-before: [END get_blob_service_properties]
+ :language: python
+ :dedent: 12
+ :caption: Getting service properties for the blob service.
+ """
+ timeout = kwargs.pop('timeout', None)
+ try:
+ service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
+ return service_properties_deserialize(service_props)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def set_service_properties(
+ self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging]
+ hour_metrics=None, # type: Optional[Metrics]
+ minute_metrics=None, # type: Optional[Metrics]
+ cors=None, # type: Optional[List[CorsRule]]
+ target_version=None, # type: Optional[str]
+ delete_retention_policy=None, # type: Optional[RetentionPolicy]
+ static_website=None, # type: Optional[StaticWebsite]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Sets the properties of a storage account's Blob service, including
+ Azure Storage Analytics.
+
+ If an element (e.g. analytics_logging) is left as None, the
+ existing settings on the service for that functionality are preserved.
+
+ :param analytics_logging:
+ Groups the Azure Analytics Logging settings.
+ :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
+ :param hour_metrics:
+ The hour metrics settings provide a summary of request
+ statistics grouped by API in hourly aggregates for blobs.
+ :type hour_metrics: ~azure.storage.blob.Metrics
+ :param minute_metrics:
+ The minute metrics settings provide request statistics
+ for each minute for blobs.
+ :type minute_metrics: ~azure.storage.blob.Metrics
+ :param cors:
+ You can include up to five CorsRule elements in the
+ list. If an empty list is specified, all CORS rules will be deleted,
+ and CORS will be disabled for the service.
+ :type cors: list[~azure.storage.blob.CorsRule]
+ :param str target_version:
+ Indicates the default version to use for requests if an incoming
+ request's version is not specified.
+ :param delete_retention_policy:
+ The delete retention policy specifies whether to retain deleted blobs.
+ It also specifies the number of days and versions of blob to keep.
+ :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
+ :param static_website:
+ Specifies whether the static website feature is enabled,
+ and if yes, indicates the index document and 404 error document to use.
+ :type static_website: ~azure.storage.blob.StaticWebsite
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START set_blob_service_properties]
+ :end-before: [END set_blob_service_properties]
+ :language: python
+ :dedent: 12
+ :caption: Setting service properties for the blob service.
+ """
+ if all(parameter is None for parameter in [
+ analytics_logging, hour_metrics, minute_metrics, cors,
+ target_version, delete_retention_policy, static_website]):
+ raise ValueError("set_service_properties should be called with at least one parameter")
+
+ props = StorageServiceProperties(
+ logging=analytics_logging,
+ hour_metrics=hour_metrics,
+ minute_metrics=minute_metrics,
+ cors=cors,
+ default_service_version=target_version,
+ delete_retention_policy=delete_retention_policy,
+ static_website=static_website
+ )
+ timeout = kwargs.pop('timeout', None)
+ try:
+ await self._client.service.set_properties(props, timeout=timeout, **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def list_containers(
+ self, name_starts_with=None, # type: Optional[str]
+ include_metadata=False, # type: Optional[bool]
+ **kwargs
+ ):
+ # type: (...) -> AsyncItemPaged[ContainerProperties]
+ """Returns a generator to list the containers under the specified account.
+
+ The generator will lazily follow the continuation tokens returned by
+ the service and stop when all containers have been returned.
+
+ :param str name_starts_with:
+ Filters the results to return only containers whose names
+ begin with the specified prefix.
+ :param bool include_metadata:
+ Specifies that container metadata to be returned in the response.
+ The default value is `False`.
+ :keyword bool include_deleted:
+ Specifies that deleted containers to be returned in the response. This is for container restore enabled
+ account. The default value is `False`.
+ .. versionadded:: 12.4.0
+ :keyword int results_per_page:
+ The maximum number of container names to retrieve per API
+ call. If the request does not specify the server will return up to 5,000 items.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) of ContainerProperties.
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START bsc_list_containers]
+ :end-before: [END bsc_list_containers]
+ :language: python
+ :dedent: 16
+ :caption: Listing the containers in the blob service.
+ """
+ include = ['metadata'] if include_metadata else []
+ include_deleted = kwargs.pop('include_deleted', None)
+ if include_deleted:
+ include.append("deleted")
+ timeout = kwargs.pop('timeout', None)
+ results_per_page = kwargs.pop('results_per_page', None)
+ command = functools.partial(
+ self._client.service.list_containers_segment,
+ prefix=name_starts_with,
+ include=include,
+ timeout=timeout,
+ **kwargs)
+ return AsyncItemPaged(
+ command,
+ prefix=name_starts_with,
+ results_per_page=results_per_page,
+ page_iterator_class=ContainerPropertiesPaged
+ )
+
+ @distributed_trace
+ def find_blobs_by_tags(self, filter_expression, **kwargs):
+ # type: (str, **Any) -> AsyncItemPaged[FilteredBlob]
+ """The Filter Blobs operation enables callers to list blobs across all
+ containers whose tags match a given search expression. Filter blobs
+ searches across all containers within a storage account but can be
+ scoped within the expression to a single container.
+
+ :param str filter_expression:
+ The expression to find blobs whose tags matches the specified condition.
+ eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+ To specify a container, eg. "@container='containerName' and \"Name\"='C'"
+ :keyword int results_per_page:
+ The max result per page when paginating.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of BlobProperties.
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob]
+ """
+
+ results_per_page = kwargs.pop('results_per_page', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._client.service.filter_blobs,
+ where=filter_expression,
+ timeout=timeout,
+ **kwargs)
+ return AsyncItemPaged(
+ command, results_per_page=results_per_page,
+ page_iterator_class=FilteredBlobPaged)
+
+ @distributed_trace_async
+ async def create_container(
+ self, name, # type: str
+ metadata=None, # type: Optional[Dict[str, str]]
+ public_access=None, # type: Optional[Union[PublicAccess, str]]
+ **kwargs
+ ):
+ # type: (...) -> ContainerClient
+ """Creates a new container under the specified account.
+
+ If the container with the same name already exists, a ResourceExistsError will
+ be raised. This method returns a client with which to interact with the newly
+ created container.
+
+ :param str name: The name of the container to create.
+ :param metadata:
+ A dict with name-value pairs to associate with the
+ container as metadata. Example: `{'Category':'test'}`
+ :type metadata: dict(str, str)
+ :param public_access:
+ Possible values include: 'container', 'blob'.
+ :type public_access: str or ~azure.storage.blob.PublicAccess
+ :keyword container_encryption_scope:
+ Specifies the default encryption scope to set on the container and use for
+ all future writes.
+
+ .. versionadded:: 12.2.0
+
+ :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.blob.aio.ContainerClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START bsc_create_container]
+ :end-before: [END bsc_create_container]
+ :language: python
+ :dedent: 16
+ :caption: Creating a container in the blob service.
+ """
+ container = self.get_container_client(name)
+ timeout = kwargs.pop('timeout', None)
+ kwargs.setdefault('merge_span', True)
+ await container.create_container(
+ metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
+ return container
+
+ @distributed_trace_async
+ async def delete_container(
+ self, container, # type: Union[ContainerProperties, str]
+ lease=None, # type: Optional[Union[BlobLeaseClient, str]]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Marks the specified container for deletion.
+
+ The container and any blobs contained within it are later deleted during garbage collection.
+ If the container is not found, a ResourceNotFoundError will be raised.
+
+ :param container:
+ The container to delete. This can either be the name of the container,
+ or an instance of ContainerProperties.
+ :type container: str or ~azure.storage.blob.ContainerProperties
+ :param lease:
+ If specified, delete_container only succeeds if the
+ container's lease is active and matches this ID.
+ Required if the container has an active lease.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START bsc_delete_container]
+ :end-before: [END bsc_delete_container]
+ :language: python
+ :dedent: 16
+ :caption: Deleting a container in the blob service.
+ """
+ container = self.get_container_client(container) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ await container.delete_container( # type: ignore
+ lease=lease,
+ timeout=timeout,
+ **kwargs)
+
+ @distributed_trace_async
+ async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs):
+ # type: (str, str, str, **Any) -> ContainerClient
+ """Restores soft-deleted container.
+
+ Operation will only be successful if used within the specified number of days
+ set in the delete retention policy.
+
+ .. versionadded:: 12.4.0
+ This operation was introduced in API version '2019-12-12'.
+
+ :param str deleted_container_name:
+ Specifies the name of the deleted container to restore.
+ :param str deleted_container_version:
+ Specifies the version of the deleted container to restore.
+ :keyword str new_name:
+ The new name for the deleted container to be restored to.
+ If not specified deleted_container_name will be used as the restored container name.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: ~azure.storage.blob.aio.ContainerClient
+ """
+ new_name = kwargs.pop('new_name', None)
+ container = self.get_container_client(new_name or deleted_container_name)
+ try:
+ await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access
+ deleted_container_version=deleted_container_version,
+ timeout=kwargs.pop('timeout', None), **kwargs)
+ return container
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ def get_container_client(self, container):
+ # type: (Union[ContainerProperties, str]) -> ContainerClient
+ """Get a client to interact with the specified container.
+
+ The container need not already exist.
+
+ :param container:
+ The container. This can either be the name of the container,
+ or an instance of ContainerProperties.
+ :type container: str or ~azure.storage.blob.ContainerProperties
+ :returns: A ContainerClient.
+ :rtype: ~azure.storage.blob.aio.ContainerClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START bsc_get_container_client]
+ :end-before: [END bsc_get_container_client]
+ :language: python
+ :dedent: 12
+ :caption: Getting the container client to interact with a specific container.
+ """
+ try:
+ container_name = container.name
+ except AttributeError:
+ container_name = container
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return ContainerClient(
+ self.url, container_name=container_name,
+ credential=self.credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function, loop=self._loop)
+
+ def get_blob_client(
+ self, container, # type: Union[ContainerProperties, str]
+ blob, # type: Union[BlobProperties, str]
+ snapshot=None # type: Optional[Union[Dict[str, Any], str]]
+ ):
+ # type: (...) -> BlobClient
+ """Get a client to interact with the specified blob.
+
+ The blob need not already exist.
+
+ :param container:
+ The container that the blob is in. This can either be the name of the container,
+ or an instance of ContainerProperties.
+ :type container: str or ~azure.storage.blob.ContainerProperties
+ :param blob:
+ The blob with which to interact. This can either be the name of the blob,
+ or an instance of BlobProperties.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param snapshot:
+ The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
+ or a dictionary output returned by
+ :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`.
+ :type snapshot: str or dict(str, Any)
+ :returns: A BlobClient.
+ :rtype: ~azure.storage.blob.aio.BlobClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_service_async.py
+ :start-after: [START bsc_get_blob_client]
+ :end-before: [END bsc_get_blob_client]
+ :language: python
+ :dedent: 16
+ :caption: Getting the blob client to interact with a specific blob.
+ """
+ try:
+ container_name = container.name
+ except AttributeError:
+ container_name = container
+
+ try:
+ blob_name = blob.name
+ except AttributeError:
+ blob_name = blob
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return BlobClient( # type: ignore
+ self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
+ credential=self.credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function, loop=self._loop)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_container_client_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_container_client_async.py
new file mode 100644
index 00000000000..730a1fd201b
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_container_client_async.py
@@ -0,0 +1,1121 @@
+# pylint: disable=too-many-lines
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+import functools
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator,
+ TYPE_CHECKING
+)
+
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.pipeline import AsyncPipeline
+from azure.core.pipeline.transport import AsyncHttpResponse
+
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
+from .._shared.policies_async import ExponentialRetry
+from .._shared.request_handlers import add_metadata_headers, serialize_iso
+from .._shared.response_handlers import (
+ process_storage_error,
+ return_response_headers,
+ return_headers_and_deserialized)
+from .._generated import VERSION
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import (
+ StorageErrorException,
+ SignedIdentifier)
+from .._deserialize import deserialize_container_properties
+from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions
+from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name
+from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import
+from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix
+from ._lease_async import BlobLeaseClient
+from ._blob_client_async import BlobClient
+
+if TYPE_CHECKING:
+ from .._models import PublicAccess
+ from ._download_async import StorageStreamDownloader
+ from datetime import datetime
+ from .._models import ( # pylint: disable=unused-import
+ AccessPolicy,
+ StandardBlobTier,
+ PremiumPageBlobTier)
+
+
+class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase):
+ """A client to interact with a specific container, although that container
+ may not yet exist.
+
+ For operations relating to a specific blob within this container, a blob client can be
+ retrieved using the :func:`~get_blob_client` function.
+
+ :param str account_url:
+ The URI to the storage account. In order to create a client given the full URI to the container,
+ use the :func:`from_container_url` classmethod.
+ :param container_name:
+ The name of the container for the blob.
+ :type container_name: str
+ :param credential:
+ The credentials with which to authenticate. This is optional if the
+ account URL already has a SAS token. The value can be a SAS token string, an account
+ shared access key, or an instance of a TokenCredentials class from azure.identity.
+ If the URL already has a SAS token, specifying an explicit credential will take priority.
+ :keyword str api_version:
+ The Storage API version to use for requests. Default value is '2019-07-07'.
+ Setting to an older version may result in reduced feature compatibility.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str secondary_hostname:
+ The hostname of the secondary endpoint.
+ :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+ Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+ uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+ the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+ :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+ algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+ :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+ :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+ :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+ the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+ :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+ or 4MB.
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START create_container_client_from_service]
+ :end-before: [END create_container_client_from_service]
+ :language: python
+ :dedent: 8
+ :caption: Get a ContainerClient from an existing BlobServiceClient.
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START create_container_client_sasurl]
+ :end-before: [END create_container_client_sasurl]
+ :language: python
+ :dedent: 12
+ :caption: Creating the container client directly.
+ """
+ def __init__(
+ self, account_url, # type: str
+ container_name, # type: str
+ credential=None, # type: Optional[Any]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+ super(ContainerClient, self).__init__(
+ account_url,
+ container_name=container_name,
+ credential=credential,
+ **kwargs)
+ self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline)
+ self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access
+ self._loop = kwargs.get('loop', None)
+
+ @distributed_trace_async
+ async def create_container(self, metadata=None, public_access=None, **kwargs):
+ # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None
+ """
+ Creates a new container under the specified account. If the container
+ with the same name already exists, the operation fails.
+
+ :param metadata:
+ A dict with name_value pairs to associate with the
+ container as metadata. Example:{'Category':'test'}
+ :type metadata: dict[str, str]
+ :param ~azure.storage.blob.PublicAccess public_access:
+ Possible values include: 'container', 'blob'.
+ :keyword container_encryption_scope:
+ Specifies the default encryption scope to set on the container and use for
+ all future writes.
+
+ .. versionadded:: 12.2.0
+
+ :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START create_container]
+ :end-before: [END create_container]
+ :language: python
+ :dedent: 16
+ :caption: Creating a container to store blobs.
+ """
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata)) # type: ignore
+ timeout = kwargs.pop('timeout', None)
+ container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
+ try:
+ return await self._client.container.create( # type: ignore
+ timeout=timeout,
+ access=public_access,
+ container_cpk_scope_info=container_cpk_scope_info,
+ cls=return_response_headers,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def delete_container(
+ self, **kwargs):
+ # type: (Any) -> None
+ """
+ Marks the specified container for deletion. The container and any blobs
+ contained within it are later deleted during garbage collection.
+
+ :keyword lease:
+ If specified, delete_container only succeeds if the
+ container's lease is active and matches this ID.
+ Required if the container has an active lease.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START delete_container]
+ :end-before: [END delete_container]
+ :language: python
+ :dedent: 16
+ :caption: Delete a container.
+ """
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ mod_conditions = get_modify_conditions(kwargs)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ await self._client.container.delete(
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def acquire_lease(
+ self, lease_duration=-1, # type: int
+ lease_id=None, # type: Optional[str]
+ **kwargs):
+ # type: (...) -> BlobLeaseClient
+ """
+ Requests a new lease. If the container does not have an active lease,
+ the Blob service creates a lease on the container and returns a new
+ lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :param str lease_id:
+ Proposed lease ID, in a GUID string format. The Blob service returns
+ 400 (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: A BlobLeaseClient object, that can be run in a context manager.
+ :rtype: ~azure.storage.blob.aio.BlobLeaseClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START acquire_lease_on_container]
+ :end-before: [END acquire_lease_on_container]
+ :language: python
+ :dedent: 12
+ :caption: Acquiring a lease on the container.
+ """
+ lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
+ return lease
+
+ @distributed_trace_async
+ async def get_account_information(self, **kwargs):
+ # type: (**Any) -> Dict[str, str]
+ """Gets information related to the storage account.
+
+ The information can also be retrieved if the user has a SAS to a container or blob.
+ The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+ :returns: A dict of account information (SKU and account type).
+ :rtype: dict(str, str)
+ """
+ try:
+ return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def get_container_properties(self, **kwargs):
+ # type: (**Any) -> ContainerProperties
+ """Returns all user-defined metadata and system properties for the specified
+ container. The data returned does not include the container's list of blobs.
+
+ :keyword lease:
+ If specified, get_container_properties only succeeds if the
+ container's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Properties for the specified container within a container object.
+ :rtype: ~azure.storage.blob.ContainerProperties
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START get_container_properties]
+ :end-before: [END get_container_properties]
+ :language: python
+ :dedent: 16
+ :caption: Getting properties on the container.
+ """
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ response = await self._client.container.get_properties(
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ cls=deserialize_container_properties,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ response.name = self.container_name
+ return response # type: ignore
+
+ @distributed_trace_async
+ async def set_container_metadata( # type: ignore
+ self, metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets one or more user-defined name-value pairs for the specified
+ container. Each call to this operation replaces all existing metadata
+ attached to the container. To remove all metadata from the container,
+ call this operation with no metadata dict.
+
+ :param metadata:
+ A dict containing name-value pairs to associate with the container as
+ metadata. Example: {'category':'test'}
+ :type metadata: dict[str, str]
+ :keyword lease:
+ If specified, set_container_metadata only succeeds if the
+ container's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Container-updated property dict (Etag and last modified).
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START set_container_metadata]
+ :end-before: [END set_container_metadata]
+ :language: python
+ :dedent: 16
+ :caption: Setting metadata on the container.
+ """
+ headers = kwargs.pop('headers', {})
+ headers.update(add_metadata_headers(metadata))
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ mod_conditions = get_modify_conditions(kwargs)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ return await self._client.container.set_metadata( # type: ignore
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace_async
+ async def get_container_access_policy(self, **kwargs):
+ # type: (Any) -> Dict[str, Any]
+ """Gets the permissions for the specified container.
+ The permissions indicate whether container data may be accessed publicly.
+
+ :keyword lease:
+ If specified, get_container_access_policy only succeeds if the
+ container's lease is active and matches this ID.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Access policy information in a dict.
+ :rtype: dict[str, Any]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START get_container_access_policy]
+ :end-before: [END get_container_access_policy]
+ :language: python
+ :dedent: 16
+ :caption: Getting the access policy on the container.
+ """
+ lease = kwargs.pop('lease', None)
+ access_conditions = get_access_conditions(lease)
+ timeout = kwargs.pop('timeout', None)
+ try:
+ response, identifiers = await self._client.container.get_access_policy(
+ timeout=timeout,
+ lease_access_conditions=access_conditions,
+ cls=return_headers_and_deserialized,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return {
+ 'public_access': response.get('blob_public_access'),
+ 'signed_identifiers': identifiers or []
+ }
+
+ @distributed_trace_async
+ async def set_container_access_policy(
+ self, signed_identifiers, # type: Dict[str, AccessPolicy]
+ public_access=None, # type: Optional[Union[str, PublicAccess]]
+ **kwargs # type: Any
+ ): # type: (...) -> Dict[str, Union[str, datetime]]
+ """Sets the permissions for the specified container or stored access
+ policies that may be used with Shared Access Signatures. The permissions
+ indicate whether blobs in a container may be accessed publicly.
+
+ :param signed_identifiers:
+ A dictionary of access policies to associate with the container. The
+ dictionary may contain up to 5 elements. An empty dictionary
+ will clear the access policies set on the service.
+ :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
+ :param ~azure.storage.blob.PublicAccess public_access:
+ Possible values include: 'container', 'blob'.
+ :keyword lease:
+ Required if the container has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified date/time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A datetime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: Container-updated property dict (Etag and last modified).
+ :rtype: dict[str, str or ~datetime.datetime]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START set_container_access_policy]
+ :end-before: [END set_container_access_policy]
+ :language: python
+ :dedent: 16
+ :caption: Setting access policy on the container.
+ """
+ timeout = kwargs.pop('timeout', None)
+ lease = kwargs.pop('lease', None)
+ if len(signed_identifiers) > 5:
+ raise ValueError(
+ 'Too many access policies provided. The server does not support setting '
+ 'more than 5 access policies on a single resource.')
+ identifiers = []
+ for key, value in signed_identifiers.items():
+ if value:
+ value.start = serialize_iso(value.start)
+ value.expiry = serialize_iso(value.expiry)
+ identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
+ signed_identifiers = identifiers # type: ignore
+
+ mod_conditions = get_modify_conditions(kwargs)
+ access_conditions = get_access_conditions(lease)
+ try:
+ return await self._client.container.set_access_policy(
+ container_acl=signed_identifiers or None,
+ timeout=timeout,
+ access=public_access,
+ lease_access_conditions=access_conditions,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ @distributed_trace
+ def list_blobs(self, name_starts_with=None, include=None, **kwargs):
+ # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties]
+ """Returns a generator to list the blobs under the specified container.
+ The generator will lazily follow the continuation tokens returned by
+ the service.
+
+ :param str name_starts_with:
+ Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param list[str] or str include:
+ Specifies one or more additional datasets to include in the response.
+ Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of BlobProperties.
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START list_blobs_in_container]
+ :end-before: [END list_blobs_in_container]
+ :language: python
+ :dedent: 12
+ :caption: List the blobs in the container.
+ """
+ if include and not isinstance(include, list):
+ include = [include]
+
+ results_per_page = kwargs.pop('results_per_page', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._client.container.list_blob_flat_segment,
+ include=include,
+ timeout=timeout,
+ **kwargs)
+ return AsyncItemPaged(
+ command,
+ prefix=name_starts_with,
+ results_per_page=results_per_page,
+ page_iterator_class=BlobPropertiesPaged
+ )
+
+ @distributed_trace
+ def walk_blobs(
+ self, name_starts_with=None, # type: Optional[str]
+ include=None, # type: Optional[Any]
+ delimiter="/", # type: str
+ **kwargs # type: Optional[Any]
+ ):
+ # type: (...) -> AsyncItemPaged[BlobProperties]
+ """Returns a generator to list the blobs under the specified container.
+ The generator will lazily follow the continuation tokens returned by
+ the service. This operation will list blobs in accordance with a hierarchy,
+ as delimited by the specified delimiter character.
+
+ :param str name_starts_with:
+ Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param list[str] include:
+ Specifies one or more additional datasets to include in the response.
+ Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'.
+ :param str delimiter:
+ When the request includes this parameter, the operation returns a BlobPrefix
+ element in the response body that acts as a placeholder for all blobs whose
+ names begin with the same substring up to the appearance of the delimiter
+ character. The delimiter may be a single character or a string.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :returns: An iterable (auto-paging) response of BlobProperties.
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
+ """
+ if include and not isinstance(include, list):
+ include = [include]
+
+ results_per_page = kwargs.pop('results_per_page', None)
+ timeout = kwargs.pop('timeout', None)
+ command = functools.partial(
+ self._client.container.list_blob_hierarchy_segment,
+ delimiter=delimiter,
+ include=include,
+ timeout=timeout,
+ **kwargs)
+ return BlobPrefix(
+ command,
+ prefix=name_starts_with,
+ results_per_page=results_per_page,
+ delimiter=delimiter)
+
+ @distributed_trace_async
+ async def upload_blob(
+ self, name, # type: Union[str, BlobProperties]
+ data, # type: Union[Iterable[AnyStr], IO[AnyStr]]
+ blob_type=BlobType.BlockBlob, # type: Union[str, BlobType]
+ length=None, # type: Optional[int]
+ metadata=None, # type: Optional[Dict[str, str]]
+ **kwargs
+ ):
+ # type: (...) -> BlobClient
+ """Creates a new blob from a data source with automatic chunking.
+
+ :param name: The blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type name: str or ~azure.storage.blob.BlobProperties
+ :param data: The blob data to upload.
+ :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+ either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :param metadata:
+ Name-value pairs associated with the blob as metadata.
+ :type metadata: dict(str, str)
+ :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+ If True, upload_blob will overwrite the existing data. If set to False, the
+ operation will fail with ResourceExistsError. The exception to the above is with Append
+ blob types: if set to False and the data already exists, an error will not be raised
+ and the data will be appended to the existing blob. If set overwrite=True, then the existing
+ append blob will be deleted, and a new one created. Defaults to False.
+ :keyword ~azure.storage.blob.ContentSettings content_settings:
+ ContentSettings object used to set blob properties. Used to set content type, encoding,
+ language, disposition, md5, and cache control.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used, because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the container has an active lease. Value can be a BlobLeaseClient object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+ A page blob tier value to set the blob to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+ :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+ A standard blob tier value to set the blob to. For this version of the library,
+ this is only applicable to block blobs on standard storage accounts.
+ :keyword int maxsize_condition:
+ Optional conditional header. The max length in bytes permitted for
+ the append blob. If the Append Block operation would cause the blob
+ to exceed that limit or if the blob size is already greater than the
+ value specified in this header, the request will fail with
+ MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+ :keyword int max_concurrency:
+ Maximum number of parallel connections to use when the blob size exceeds
+ 64MB.
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword str encryption_scope:
+ A predefined encryption scope used to encrypt the data on the service. An encryption
+ scope can be created using the Management API and referenced here by name. If a default
+ encryption scope has been defined at the container, this value will override it if the
+ container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+ .. versionadded:: 12.2.0
+
+ :keyword str encoding:
+ Defaults to UTF-8.
+ :returns: A BlobClient to interact with the newly uploaded blob.
+ :rtype: ~azure.storage.blob.aio.BlobClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START upload_blob_to_container]
+ :end-before: [END upload_blob_to_container]
+ :language: python
+ :dedent: 12
+ :caption: Upload blob to the container.
+ """
+ blob = self.get_blob_client(name)
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ encoding = kwargs.pop('encoding', 'UTF-8')
+ await blob.upload_blob(
+ data,
+ blob_type=blob_type,
+ length=length,
+ metadata=metadata,
+ timeout=timeout,
+ encoding=encoding,
+ **kwargs
+ )
+ return blob
+
+ @distributed_trace_async
+ async def delete_blob(
+ self, blob, # type: Union[str, BlobProperties]
+ delete_snapshots=None, # type: Optional[str]
+ **kwargs
+ ):
+ # type: (...) -> None
+ """Marks the specified blob or snapshot for deletion.
+
+ The blob is later deleted during garbage collection.
+ Note that in order to delete a blob, you must delete all of its
+ snapshots. You can delete both at the same time with the delete_blob
+ operation.
+
+ If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
+ and retains the blob or snapshot for specified number of days.
+ After specified number of days, blob's data is removed from the service during garbage collection.
+ Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+ option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()`
+
+ :param blob: The blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param str delete_snapshots:
+ Required if the blob has associated snapshots. Values include:
+ - "only": Deletes only the blobs snapshots.
+ - "include": Deletes the blob along with all snapshots.
+ :keyword str version_id:
+ The version id parameter is an opaque DateTime
+ value that, when present, specifies the version of the blob to delete.
+
+ .. versionadded:: 12.4.0
+ This keyword argument was introduced in API version '2019-12-12'.
+
+ :keyword lease:
+ Required if the blob has an active lease. Value can be a Lease object
+ or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ blob = self.get_blob_client(blob) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ timeout = kwargs.pop('timeout', None)
+ await blob.delete_blob( # type: ignore
+ delete_snapshots=delete_snapshots,
+ timeout=timeout,
+ **kwargs)
+
+ @distributed_trace_async
+ async def download_blob(self, blob, offset=None, length=None, **kwargs):
+ # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader
+ """Downloads a blob to the StorageStreamDownloader. The readall() method must
+ be used to read all the content or readinto() must be used to download the blob into
+ a stream.
+
+ :param blob: The blob with which to interact. If specified, this value will override
+ a blob value specified in the blob URL.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param int offset:
+ Start of byte range to use for downloading a section of the blob.
+ Must be set if length is provided.
+ :param int length:
+ Number of bytes to read from the stream. This is optional, but
+ should be supplied for optimal performance.
+ :keyword bool validate_content:
+ If true, calculates an MD5 hash for each chunk of the blob. The storage
+ service checks the hash of the content that has arrived with the hash
+ that was sent. This is primarily valuable for detecting bitflips on
+ the wire if using http instead of https, as https (the default), will
+ already validate. Note that this MD5 hash is not stored with the
+ blob. Also note that if enabled, the memory-efficient upload algorithm
+ will not be used because computing the MD5 hash requires buffering
+ entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+ :keyword lease:
+ Required if the blob has an active lease. If specified, download_blob only
+ succeeds if the blob's lease is active and matches this ID. Value can be a
+ BlobLeaseClient object or the lease ID as a string.
+ :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+ Encrypts the data on the service-side with the given key.
+ Use of customer-provided keys must be done over HTTPS.
+ As the encryption key itself is provided in the request,
+ a secure connection must be established to transfer the key.
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :keyword str encoding:
+ Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :returns: A streaming object. (StorageStreamDownloader)
+ :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
+ """
+ blob_client = self.get_blob_client(blob) # type: ignore
+ kwargs.setdefault('merge_span', True)
+ return await blob_client.download_blob(
+ offset=offset,
+ length=length,
+ **kwargs)
+
+ @distributed_trace_async
+ async def delete_blobs( # pylint: disable=arguments-differ
+ self, *blobs: List[Union[str, BlobProperties, dict]],
+ **kwargs
+ ) -> AsyncIterator[AsyncHttpResponse]:
+ """Marks the specified blobs or snapshots for deletion.
+
+ The blobs are later deleted during garbage collection.
+ Note that in order to delete blobs, you must delete all of their
+ snapshots. You can delete both at the same time with the delete_blobs operation.
+
+ If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
+ and retains the blobs or snapshots for specified number of days.
+ After specified number of days, blobs' data is removed from the service during garbage collection.
+ Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+ Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()`
+
+ :param blobs:
+ The blobs to delete. This can be a single blob, or multiple values can
+ be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+ .. note::
+ When the blob type is dict, here's a list of keys, value rules.
+
+ blob name:
+ key: 'name', value type: str
+ snapshot you want to delete:
+ key: 'snapshot', value type: str
+ whether to delete snapthots when deleting blob:
+ key: 'delete_snapshots', value: 'include' or 'only'
+ if the blob modified or not:
+ key: 'if_modified_since', 'if_unmodified_since', value type: datetime
+ etag:
+ key: 'etag', value type: str
+ match the etag or not:
+ key: 'match_condition', value type: MatchConditions
+ tags match condition:
+ key: 'if_tags_match_condition', value type: str
+ lease:
+ key: 'lease_id', value type: Union[str, LeaseClient]
+ timeout for subrequest:
+ key: 'timeout', value type: int
+
+ :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
+ :keyword str delete_snapshots:
+ Required if a blob has associated snapshots. Values include:
+ - "only": Deletes only the blobs snapshots.
+ - "include": Deletes the blob along with all snapshots.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword bool raise_on_any_failure:
+ This is a boolean param which defaults to True. When this is set, an exception
+ is raised even if there is a single operation failure. For optimal performance,
+ this should be set to False
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: An async iterator of responses, one for each blob in order
+ :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_common_async.py
+ :start-after: [START delete_multiple_blobs]
+ :end-before: [END delete_multiple_blobs]
+ :language: python
+ :dedent: 12
+ :caption: Deleting multiple blobs.
+ """
+ if len(blobs) == 0:
+ return iter(list())
+
+ reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs)
+
+ return await self._batch_send(*reqs, **options)
+
+ @distributed_trace
+ async def set_standard_blob_tier_blobs(
+ self,
+ standard_blob_tier: Union[str, 'StandardBlobTier'],
+ *blobs: List[Union[str, BlobProperties, dict]],
+ **kwargs
+ ) -> AsyncIterator[AsyncHttpResponse]:
+ """This operation sets the tier on block blobs.
+
+ A block blob's tier determines Hot/Cool/Archive storage type.
+ This operation does not update the blob's ETag.
+
+ :param standard_blob_tier:
+ Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool',
+ 'Archive'. The hot tier is optimized for storing data that is accessed
+ frequently. The cool storage tier is optimized for storing data that
+ is infrequently accessed and stored for at least a month. The archive
+ tier is optimized for storing data that is rarely accessed and stored
+ for at least six months with flexible latency requirements.
+
+ .. note::
+ If you want to set different tier on different blobs please set this positional parameter to None.
+ Then the blob tier on every BlobProperties will be taken.
+
+ :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+ :param blobs:
+ The blobs with which to interact. This can be a single blob, or multiple values can
+ be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+ .. note::
+ When the blob type is dict, here's a list of keys, value rules.
+ blob name:
+ key: 'name', value type: str
+ standard blob tier:
+ key: 'blob_tier', value type: StandardBlobTier
+ rehydrate priority:
+ key: 'rehydrate_priority', value type: RehydratePriority
+ lease:
+ key: 'lease_id', value type: Union[str, LeaseClient]
+ tags match condition:
+ key: 'if_tags_match_condition', value type: str
+ timeout for subrequest:
+ key: 'timeout', value type: int
+
+ :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
+ :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+ Indicates the priority with which to rehydrate an archived blob
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :keyword bool raise_on_any_failure:
+ This is a boolean param which defaults to True. When this is set, an exception
+ is raised even if there is a single operation failure. For optimal performance,
+ this should be set to False.
+ :return: An async iterator of responses, one for each blob in order
+ :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+ """
+ reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs)
+
+ return await self._batch_send(*reqs, **options)
+
+ @distributed_trace
+ async def set_premium_page_blob_tier_blobs(
+ self,
+ premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'],
+ *blobs: List[Union[str, BlobProperties, dict]],
+ **kwargs
+ ) -> AsyncIterator[AsyncHttpResponse]:
+ """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts.
+
+ :param premium_page_blob_tier:
+ A page blob tier value to set on all blobs to. The tier correlates to the size of the
+ blob and number of allowed IOPS. This is only applicable to page blobs on
+ premium storage accounts.
+
+ .. note::
+ If you want to set different tier on different blobs please set this positional parameter to None.
+ Then the blob tier on every BlobProperties will be taken.
+
+ :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+ :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
+ be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+ .. note::
+ When the blob type is dict, here's a list of keys, value rules.
+
+ blob name:
+ key: 'name', value type: str
+ premium blob tier:
+ key: 'blob_tier', value type: PremiumPageBlobTier
+ lease:
+ key: 'lease_id', value type: Union[str, LeaseClient]
+ timeout for subrequest:
+ key: 'timeout', value type: int
+
+ :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties]
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds. This method may make
+ multiple calls to the Azure service and the timeout will apply to
+ each call individually.
+ :keyword bool raise_on_any_failure:
+ This is a boolean param which defaults to True. When this is set, an exception
+ is raised even if there is a single operation failure. For optimal performance,
+ this should be set to False.
+ :return: An async iterator of responses, one for each blob in order
+ :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+ """
+ reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs)
+
+ return await self._batch_send(*reqs, **options)
+
+ def get_blob_client(
+ self, blob, # type: Union[BlobProperties, str]
+ snapshot=None # type: str
+ ):
+ # type: (...) -> BlobClient
+ """Get a client to interact with the specified blob.
+
+ The blob need not already exist.
+
+ :param blob:
+ The blob with which to interact.
+ :type blob: str or ~azure.storage.blob.BlobProperties
+ :param str snapshot:
+ The optional blob snapshot on which to operate. This can be the snapshot ID string
+ or the response returned from :func:`~BlobClient.create_snapshot()`.
+ :returns: A BlobClient.
+ :rtype: ~azure.storage.blob.aio.BlobClient
+
+ .. admonition:: Example:
+
+ .. literalinclude:: ../samples/blob_samples_containers_async.py
+ :start-after: [START get_blob_client]
+ :end-before: [END get_blob_client]
+ :language: python
+ :dedent: 12
+ :caption: Get the blob client.
+ """
+ blob_name = _get_blob_name(blob)
+ _pipeline = AsyncPipeline(
+ transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+ policies=self._pipeline._impl_policies # pylint: disable = protected-access
+ )
+ return BlobClient(
+ self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
+ credential=self.credential, api_version=self.api_version, _configuration=self._config,
+ _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+ require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key,
+ key_resolver_function=self.key_resolver_function, loop=self._loop)
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_download_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_download_async.py
new file mode 100644
index 00000000000..44ba51d272d
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_download_async.py
@@ -0,0 +1,502 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+import asyncio
+import sys
+from io import BytesIO
+from itertools import islice
+import warnings
+
+from azure.core.exceptions import HttpResponseError
+from .._shared.encryption import decrypt_blob
+from .._shared.request_handlers import validate_and_format_range_headers
+from .._shared.response_handlers import process_storage_error, parse_length_from_content_range
+from .._deserialize import get_page_ranges_result
+from .._download import process_range_and_offset, _ChunkDownloader
+
+async def process_content(data, start_offset, end_offset, encryption):
+ if data is None:
+ raise ValueError("Response cannot be None.")
+ try:
+ content = data.response.body()
+ except Exception as error:
+ raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error)
+ if encryption.get('key') is not None or encryption.get('resolver') is not None:
+ try:
+ return decrypt_blob(
+ encryption.get('required'),
+ encryption.get('key'),
+ encryption.get('resolver'),
+ content,
+ start_offset,
+ end_offset,
+ data.response.headers)
+ except Exception as error:
+ raise HttpResponseError(
+ message="Decryption failed.",
+ response=data.response,
+ error=error)
+ return content
+
+
+class _AsyncChunkDownloader(_ChunkDownloader):
+ def __init__(self, **kwargs):
+ super(_AsyncChunkDownloader, self).__init__(**kwargs)
+ self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None
+ self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None
+
+ async def process_chunk(self, chunk_start):
+ chunk_start, chunk_end = self._calculate_range(chunk_start)
+ chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
+ length = chunk_end - chunk_start
+ if length > 0:
+ await self._write_to_stream(chunk_data, chunk_start)
+ await self._update_progress(length)
+
+ async def yield_chunk(self, chunk_start):
+ chunk_start, chunk_end = self._calculate_range(chunk_start)
+ return await self._download_chunk(chunk_start, chunk_end - 1)
+
+ async def _update_progress(self, length):
+ if self.progress_lock:
+ async with self.progress_lock: # pylint: disable=not-async-context-manager
+ self.progress_total += length
+ else:
+ self.progress_total += length
+
+ async def _write_to_stream(self, chunk_data, chunk_start):
+ if self.stream_lock:
+ async with self.stream_lock: # pylint: disable=not-async-context-manager
+ self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+ self.stream.write(chunk_data)
+ else:
+ self.stream.write(chunk_data)
+
+ async def _download_chunk(self, chunk_start, chunk_end):
+ download_range, offset = process_range_and_offset(
+ chunk_start, chunk_end, chunk_end, self.encryption_options)
+
+ # No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
+ # Do optimize and create empty chunk locally if condition is met.
+ if self._do_optimize(download_range[0], download_range[1]):
+ chunk_data = b"\x00" * self.chunk_size
+ else:
+ range_header, range_validation = validate_and_format_range_headers(
+ download_range[0],
+ download_range[1],
+ check_content_md5=self.validate_content
+ )
+ try:
+ _, response = await self.client.download(
+ range=range_header,
+ range_get_content_md5=range_validation,
+ validate_content=self.validate_content,
+ data_stream_total=self.total_size,
+ download_stream_current=self.progress_total,
+ **self.request_options
+ )
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options)
+
+ # This makes sure that if_match is set so that we can validate
+ # that subsequent downloads are to an unmodified blob
+ if self.request_options.get('modified_access_conditions'):
+ self.request_options['modified_access_conditions'].if_match = response.properties.etag
+
+ return chunk_data
+
+
+class _AsyncChunkIterator(object):
+ """Async iterator for chunks in blob download stream."""
+
+ def __init__(self, size, content, downloader):
+ self.size = size
+ self._current_content = content
+ self._iter_downloader = downloader
+ self._iter_chunks = None
+ self._complete = (size == 0)
+
+ def __len__(self):
+ return self.size
+
+ def __iter__(self):
+ raise TypeError("Async stream must be iterated asynchronously.")
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ """Iterate through responses."""
+ if self._complete:
+ raise StopAsyncIteration("Download complete")
+ if not self._iter_downloader:
+ # If no iterator was supplied, the download completed with
+ # the initial GET, so we just return that data
+ self._complete = True
+ return self._current_content
+
+ if not self._iter_chunks:
+ self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+ else:
+ try:
+ chunk = next(self._iter_chunks)
+ except StopIteration:
+ raise StopAsyncIteration("Download complete")
+ self._current_content = await self._iter_downloader.yield_chunk(chunk)
+
+ return self._current_content
+
+
+class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes
+ """A streaming object to download from Azure Storage.
+
+ :ivar str name:
+ The name of the blob being downloaded.
+ :ivar str container:
+ The name of the container where the blob is.
+ :ivar ~azure.storage.blob.BlobProperties properties:
+ The properties of the blob being downloaded. If only a range of the data is being
+ downloaded, this will be reflected in the properties.
+ :ivar int size:
+ The size of the total data in the stream. This will be the byte range if speficied,
+ otherwise the total size of the blob.
+ """
+
+ def __init__(
+ self,
+ clients=None,
+ config=None,
+ start_range=None,
+ end_range=None,
+ validate_content=None,
+ encryption_options=None,
+ max_concurrency=1,
+ name=None,
+ container=None,
+ encoding=None,
+ **kwargs
+ ):
+ self.name = name
+ self.container = container
+ self.properties = None
+ self.size = None
+
+ self._clients = clients
+ self._config = config
+ self._start_range = start_range
+ self._end_range = end_range
+ self._max_concurrency = max_concurrency
+ self._encoding = encoding
+ self._validate_content = validate_content
+ self._encryption_options = encryption_options or {}
+ self._request_options = kwargs
+ self._location_mode = None
+ self._download_complete = False
+ self._current_content = None
+ self._file_size = None
+ self._non_empty_ranges = None
+ self._response = None
+
+ # The service only provides transactional MD5s for chunks under 4MB.
+ # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+ # chunk so a transactional MD5 can be retrieved.
+ self._first_get_size = self._config.max_single_get_size if not self._validate_content \
+ else self._config.max_chunk_get_size
+ initial_request_start = self._start_range if self._start_range is not None else 0
+ if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
+ initial_request_end = self._end_range
+ else:
+ initial_request_end = initial_request_start + self._first_get_size - 1
+
+ self._initial_range, self._initial_offset = process_range_and_offset(
+ initial_request_start, initial_request_end, self._end_range, self._encryption_options
+ )
+
+ def __len__(self):
+ return self.size
+
+ async def _setup(self):
+ self._response = await self._initial_request()
+ self.properties = self._response.properties
+ self.properties.name = self.name
+ self.properties.container = self.container
+
+ # Set the content length to the download size instead of the size of
+ # the last range
+ self.properties.size = self.size
+
+ # Overwrite the content range to the user requested range
+ self.properties.content_range = 'bytes {0}-{1}/{2}'.format(
+ self._start_range,
+ self._end_range,
+ self._file_size
+ )
+
+ # Overwrite the content MD5 as it is the MD5 for the last range instead
+ # of the stored MD5
+ # TODO: Set to the stored MD5 when the service returns this
+ self.properties.content_md5 = None
+
+ if self.size == 0:
+ self._current_content = b""
+ else:
+ self._current_content = await process_content(
+ self._response,
+ self._initial_offset[0],
+ self._initial_offset[1],
+ self._encryption_options
+ )
+
+ async def _initial_request(self):
+ range_header, range_validation = validate_and_format_range_headers(
+ self._initial_range[0],
+ self._initial_range[1],
+ start_range_required=False,
+ end_range_required=False,
+ check_content_md5=self._validate_content)
+
+ try:
+ location_mode, response = await self._clients.blob.download(
+ range=range_header,
+ range_get_content_md5=range_validation,
+ validate_content=self._validate_content,
+ data_stream_total=None,
+ download_stream_current=0,
+ **self._request_options)
+
+ # Check the location we read from to ensure we use the same one
+ # for subsequent requests.
+ self._location_mode = location_mode
+
+ # Parse the total file size and adjust the download size if ranges
+ # were specified
+ self._file_size = parse_length_from_content_range(response.properties.content_range)
+ if self._end_range is not None:
+ # Use the length unless it is over the end of the file
+ self.size = min(self._file_size, self._end_range - self._start_range + 1)
+ elif self._start_range is not None:
+ self.size = self._file_size - self._start_range
+ else:
+ self.size = self._file_size
+
+ except HttpResponseError as error:
+ if self._start_range is None and error.response.status_code == 416:
+ # Get range will fail on an empty file. If the user did not
+ # request a range, do a regular get request in order to get
+ # any properties.
+ try:
+ _, response = await self._clients.blob.download(
+ validate_content=self._validate_content,
+ data_stream_total=0,
+ download_stream_current=0,
+ **self._request_options)
+ except HttpResponseError as error:
+ process_storage_error(error)
+
+ # Set the download size to empty
+ self.size = 0
+ self._file_size = 0
+ else:
+ process_storage_error(error)
+
+ # get page ranges to optimize downloading sparse page blob
+ if response.properties.blob_type == 'PageBlob':
+ try:
+ page_ranges = await self._clients.page_blob.get_page_ranges()
+ self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
+ except HttpResponseError:
+ pass
+
+ # If the file is small, the download is complete at this point.
+ # If file size is large, download the rest of the file in chunks.
+ if response.properties.size != self.size:
+ # Lock on the etag. This can be overriden by the user by specifying '*'
+ if self._request_options.get('modified_access_conditions'):
+ if not self._request_options['modified_access_conditions'].if_match:
+ self._request_options['modified_access_conditions'].if_match = response.properties.etag
+ else:
+ self._download_complete = True
+ return response
+
+ def chunks(self):
+ """Iterate over chunks in the download stream.
+
+ :rtype: Iterable[bytes]
+ """
+ if self.size == 0 or self._download_complete:
+ iter_downloader = None
+ else:
+ data_end = self._file_size
+ if self._end_range is not None:
+ # Use the length unless it is over the end of the file
+ data_end = min(self._file_size, self._end_range + 1)
+ iter_downloader = _AsyncChunkDownloader(
+ client=self._clients.blob,
+ non_empty_ranges=self._non_empty_ranges,
+ total_size=self.size,
+ chunk_size=self._config.max_chunk_get_size,
+ current_progress=self._first_get_size,
+ start_range=self._initial_range[1] + 1, # Start where the first download ended
+ end_range=data_end,
+ stream=None,
+ parallel=False,
+ validate_content=self._validate_content,
+ encryption_options=self._encryption_options,
+ use_location=self._location_mode,
+ **self._request_options)
+ return _AsyncChunkIterator(
+ size=self.size,
+ content=self._current_content,
+ downloader=iter_downloader)
+
+ async def readall(self):
+ """Download the contents of this blob.
+
+ This operation is blocking until all data is downloaded.
+ :rtype: bytes or str
+ """
+ stream = BytesIO()
+ await self.readinto(stream)
+ data = stream.getvalue()
+ if self._encoding:
+ return data.decode(self._encoding)
+ return data
+
+ async def content_as_bytes(self, max_concurrency=1):
+ """Download the contents of this file.
+
+ This operation is blocking until all data is downloaded.
+
+ :keyword int max_concurrency:
+ The number of parallel connections with which to download.
+ :rtype: bytes
+ """
+ warnings.warn(
+ "content_as_bytes is deprecated, use readall instead",
+ DeprecationWarning
+ )
+ self._max_concurrency = max_concurrency
+ return await self.readall()
+
+ async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+ """Download the contents of this blob, and decode as text.
+
+ This operation is blocking until all data is downloaded.
+
+ :param int max_concurrency:
+ The number of parallel connections with which to download.
+ :param str encoding:
+ Test encoding to decode the downloaded bytes. Default is UTF-8.
+ :rtype: str
+ """
+ warnings.warn(
+ "content_as_text is deprecated, use readall instead",
+ DeprecationWarning
+ )
+ self._max_concurrency = max_concurrency
+ self._encoding = encoding
+ return await self.readall()
+
+ async def readinto(self, stream):
+ """Download the contents of this blob to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream. The stream must be seekable if the download
+ uses more than one parallel connection.
+ :returns: The number of bytes read.
+ :rtype: int
+ """
+ # the stream must be seekable if parallel download is required
+ parallel = self._max_concurrency > 1
+ if parallel:
+ error_message = "Target stream handle must be seekable."
+ if sys.version_info >= (3,) and not stream.seekable():
+ raise ValueError(error_message)
+
+ try:
+ stream.seek(stream.tell())
+ except (NotImplementedError, AttributeError):
+ raise ValueError(error_message)
+
+ # Write the content to the user stream
+ stream.write(self._current_content)
+ if self._download_complete:
+ return self.size
+
+ data_end = self._file_size
+ if self._end_range is not None:
+ # Use the length unless it is over the end of the file
+ data_end = min(self._file_size, self._end_range + 1)
+
+ downloader = _AsyncChunkDownloader(
+ client=self._clients.blob,
+ non_empty_ranges=self._non_empty_ranges,
+ total_size=self.size,
+ chunk_size=self._config.max_chunk_get_size,
+ current_progress=self._first_get_size,
+ start_range=self._initial_range[1] + 1, # start where the first download ended
+ end_range=data_end,
+ stream=stream,
+ parallel=parallel,
+ validate_content=self._validate_content,
+ encryption_options=self._encryption_options,
+ use_location=self._location_mode,
+ **self._request_options)
+
+ dl_tasks = downloader.get_chunk_offsets()
+ running_futures = [
+ asyncio.ensure_future(downloader.process_chunk(d))
+ for d in islice(dl_tasks, 0, self._max_concurrency)
+ ]
+ while running_futures:
+ # Wait for some download to finish before adding a new one
+ done, running_futures = await asyncio.wait(
+ running_futures, return_when=asyncio.FIRST_COMPLETED)
+ try:
+ for task in done:
+ task.result()
+ except HttpResponseError as error:
+ process_storage_error(error)
+ try:
+ next_chunk = next(dl_tasks)
+ except StopIteration:
+ break
+ else:
+ running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
+
+ if running_futures:
+ # Wait for the remaining downloads to finish
+ done, _running_futures = await asyncio.wait(running_futures)
+ try:
+ for task in done:
+ task.result()
+ except HttpResponseError as error:
+ process_storage_error(error)
+ return self.size
+
+ async def download_to_stream(self, stream, max_concurrency=1):
+ """Download the contents of this blob to a stream.
+
+ :param stream:
+ The stream to download to. This can be an open file-handle,
+ or any writable stream. The stream must be seekable if the download
+ uses more than one parallel connection.
+ :param int max_concurrency:
+ The number of parallel connections with which to download.
+ :returns: The properties of the downloaded blob.
+ :rtype: Any
+ """
+ warnings.warn(
+ "download_to_stream is deprecated, use readinto instead",
+ DeprecationWarning
+ )
+ self._max_concurrency = max_concurrency
+ await self.readinto(stream)
+ return self.properties
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_lease_async.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_lease_async.py
new file mode 100644
index 00000000000..91bf93d0489
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_lease_async.py
@@ -0,0 +1,327 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+
+from typing import ( # pylint: disable=unused-import
+ Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple,
+ TypeVar, TYPE_CHECKING
+)
+
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from .._shared.response_handlers import return_response_headers, process_storage_error
+from .._generated.models import (
+ StorageErrorException,
+ LeaseAccessConditions)
+from .._serialize import get_modify_conditions
+from .._lease import BlobLeaseClient as LeaseClientBase
+
+if TYPE_CHECKING:
+ from datetime import datetime
+ from .._generated.operations import BlobOperations, ContainerOperations
+ BlobClient = TypeVar("BlobClient")
+ ContainerClient = TypeVar("ContainerClient")
+
+
+class BlobLeaseClient(LeaseClientBase):
+ """Creates a new BlobLeaseClient.
+
+ This client provides lease operations on a BlobClient or ContainerClient.
+
+ :ivar str id:
+ The ID of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired.
+ :ivar str etag:
+ The ETag of the lease currently being maintained. This will be `None` if no
+ lease has yet been acquired or modified.
+ :ivar ~datetime.datetime last_modified:
+ The last modified timestamp of the lease currently being maintained.
+ This will be `None` if no lease has yet been acquired or modified.
+
+ :param client:
+ The client of the blob or container to lease.
+ :type client: ~azure.storage.blob.aio.BlobClient or
+ ~azure.storage.blob.aio.ContainerClient
+ :param str lease_id:
+ A string representing the lease ID of an existing lease. This value does not
+ need to be specified in order to acquire a new lease, or break one.
+ """
+
+ def __enter__(self):
+ raise TypeError("Async lease must use 'async with'.")
+
+ def __exit__(self, *args):
+ self.release()
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, *args):
+ await self.release()
+
+ @distributed_trace_async
+ async def acquire(self, lease_duration=-1, **kwargs):
+ # type: (int, Any) -> None
+ """Requests a new lease.
+
+ If the container does not have an active lease, the Blob service creates a
+ lease on the container and returns a new lease ID.
+
+ :param int lease_duration:
+ Specifies the duration of the lease, in seconds, or negative one
+ (-1) for a lease that never expires. A non-infinite lease can be
+ between 15 and 60 seconds. A lease duration cannot be changed
+ using renew or change. Default is -1 (infinite lease).
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :rtype: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = await self._client.acquire_lease(
+ timeout=kwargs.pop('timeout', None),
+ duration=lease_duration,
+ proposed_lease_id=self.id,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+ self.etag = response.get('etag') # type: str
+
+ @distributed_trace_async
+ async def renew(self, **kwargs):
+ # type: (Any) -> None
+ """Renews the lease.
+
+ The lease can be renewed if the lease ID specified in the
+ lease client matches that associated with the container or blob. Note that
+ the lease may be renewed even if it has expired as long as the container
+ or blob has not been leased again since the expiration of that lease. When you
+ renew a lease, the lease duration clock resets.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = await self._client.renew_lease(
+ lease_id=self.id,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.etag = response.get('etag') # type: str
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+
+ @distributed_trace_async
+ async def release(self, **kwargs):
+ # type: (Any) -> None
+ """Release the lease.
+
+ The lease may be released if the client lease id specified matches
+ that associated with the container or blob. Releasing the lease allows another client
+ to immediately acquire the lease for the container or blob as soon as the release is complete.
+
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = await self._client.release_lease(
+ lease_id=self.id,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.etag = response.get('etag') # type: str
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+
+ @distributed_trace_async
+ async def change(self, proposed_lease_id, **kwargs):
+ # type: (str, Any) -> None
+ """Change the lease ID of an active lease.
+
+ :param str proposed_lease_id:
+ Proposed lease ID, in a GUID string format. The Blob service returns 400
+ (Invalid request) if the proposed lease ID is not in the correct format.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str etag:
+ An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+ and act according to the condition specified by the `match_condition` parameter.
+ :keyword ~azure.core.MatchConditions match_condition:
+ The match condition to use upon the etag.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: None
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = await self._client.change_lease(
+ lease_id=self.id,
+ proposed_lease_id=proposed_lease_id,
+ timeout=kwargs.pop('timeout', None),
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ self.etag = response.get('etag') # type: str
+ self.id = response.get('lease_id') # type: str
+ self.last_modified = response.get('last_modified') # type: datetime
+
+ @distributed_trace_async
+ async def break_lease(self, lease_break_period=None, **kwargs):
+ # type: (Optional[int], Any) -> int
+ """Break the lease, if the container or blob has an active lease.
+
+ Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+ the request is not required to specify a matching lease ID. When a lease
+ is broken, the lease break period is allowed to elapse, during which time
+ no lease operation except break and release can be performed on the container or blob.
+ When a lease is successfully broken, the response indicates the interval
+ in seconds until a new lease can be acquired.
+
+ :param int lease_break_period:
+ This is the proposed duration of seconds that the lease
+ should continue before it is broken, between 0 and 60 seconds. This
+ break period is only used if it is shorter than the time remaining
+ on the lease. If longer, the time remaining on the lease is used.
+ A new lease will not be available before the break period has
+ expired, but the lease may be held for longer than the break
+ period. If this header does not appear with a break
+ operation, a fixed-duration lease breaks after the remaining lease
+ period elapses, and an infinite lease breaks immediately.
+ :keyword ~datetime.datetime if_modified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only
+ if the resource has been modified since the specified time.
+ :keyword ~datetime.datetime if_unmodified_since:
+ A DateTime value. Azure expects the date value passed in to be UTC.
+ If timezone is included, any non-UTC datetimes will be converted to UTC.
+ If a date is passed in without timezone info, it is assumed to be UTC.
+ Specify this header to perform the operation only if
+ the resource has not been modified since the specified date/time.
+ :keyword str if_tags_match_condition:
+ Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+ eg. "\"tagname\"='my tag'"
+
+ .. versionadded:: 12.4.0
+
+ :keyword int timeout:
+ The timeout parameter is expressed in seconds.
+ :return: Approximate time remaining in the lease period, in seconds.
+ :rtype: int
+ """
+ mod_conditions = get_modify_conditions(kwargs)
+ try:
+ response = await self._client.break_lease(
+ timeout=kwargs.pop('timeout', None),
+ break_period=lease_break_period,
+ modified_access_conditions=mod_conditions,
+ cls=return_response_headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
+ return response.get('lease_time') # type: ignore
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_list_blobs_helper.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_list_blobs_helper.py
new file mode 100644
index 00000000000..dc098465940
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_list_blobs_helper.py
@@ -0,0 +1,162 @@
+# pylint: disable=too-many-lines
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged
+from .._deserialize import get_blob_properties_from_generated_code
+from .._models import BlobProperties
+from .._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix
+from .._shared.models import DictMixin
+from .._shared.response_handlers import return_context_and_deserialized, process_storage_error
+
+
+class BlobPropertiesPaged(AsyncPageIterator):
+ """An Iterable of Blob properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A blob name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.models.BlobProperties)
+ :ivar str container: The container that the blobs are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str container: The container that the blobs are listed from.
+ :param str prefix: Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of blobs to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ :param str delimiter:
+ Used to capture blobs whose names begin with the same substring up to
+ the appearance of the delimiter character. The delimiter may be a single
+ character or a string.
+ :param location_mode: Specifies the location the request should be sent to.
+ This mode only applies for RA-GRS accounts which allow secondary read access.
+ Options include 'primary' or 'secondary'.
+ """
+ def __init__(
+ self, command,
+ container=None,
+ prefix=None,
+ results_per_page=None,
+ continuation_token=None,
+ delimiter=None,
+ location_mode=None):
+ super(BlobPropertiesPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.prefix = prefix
+ self.marker = None
+ self.results_per_page = results_per_page
+ self.container = container
+ self.delimiter = delimiter
+ self.current_page = None
+ self.location_mode = location_mode
+
+ async def _get_next_cb(self, continuation_token):
+ try:
+ return await self._command(
+ prefix=self.prefix,
+ marker=continuation_token or None,
+ maxresults=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ async def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.prefix = self._response.prefix
+ self.marker = self._response.marker
+ self.results_per_page = self._response.max_results
+ self.container = self._response.container_name
+ self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
+
+ return self._response.next_marker or None, self.current_page
+
+ def _build_item(self, item):
+ if isinstance(item, BlobProperties):
+ return item
+ if isinstance(item, BlobItemInternal):
+ blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access
+ blob.container = self.container
+ return blob
+ return item
+
+
+class BlobPrefix(AsyncItemPaged, DictMixin):
+ """An Iterable of Blob properties.
+
+ Returned from walk_blobs when a delimiter is used.
+ Can be thought of as a virtual blob directory.
+
+ :ivar str name: The prefix, or "directory name" of the blob.
+ :ivar str prefix: A blob name prefix being used to filter the list.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.models.BlobProperties)
+ :ivar str container: The container that the blobs are listed from.
+ :ivar str delimiter: A delimiting character used for hierarchy listing.
+ :param callable command: Function to retrieve the next page of items.
+ :param str prefix: Filters the results to return only blobs whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of blobs to retrieve per
+ call.
+ :param str marker: An opaque continuation token.
+ :param str delimiter:
+ Used to capture blobs whose names begin with the same substring up to
+ the appearance of the delimiter character. The delimiter may be a single
+ character or a string.
+ :param location_mode: Specifies the location the request should be sent to.
+ This mode only applies for RA-GRS accounts which allow secondary read access.
+ Options include 'primary' or 'secondary'.
+ """
+ def __init__(self, *args, **kwargs):
+ super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
+ self.name = kwargs.get('prefix')
+ self.prefix = kwargs.get('prefix')
+ self.results_per_page = kwargs.get('results_per_page')
+ self.container = kwargs.get('container')
+ self.delimiter = kwargs.get('delimiter')
+ self.location_mode = kwargs.get('location_mode')
+
+
+class BlobPrefixPaged(BlobPropertiesPaged):
+ def __init__(self, *args, **kwargs):
+ super(BlobPrefixPaged, self).__init__(*args, **kwargs)
+ self.name = self.prefix
+
+ async def _extract_data_cb(self, get_next_return):
+ continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
+ self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+ self.current_page = [self._build_item(item) for item in self.current_page]
+ self.delimiter = self._response.delimiter
+
+ return continuation_token, self.current_page
+
+ def _build_item(self, item):
+ item = super(BlobPrefixPaged, self)._build_item(item)
+ if isinstance(item, GenBlobPrefix):
+ return BlobPrefix(
+ self._command,
+ container=self.container,
+ prefix=item.name,
+ results_per_page=self.results_per_page,
+ location_mode=self.location_mode)
+ return item
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_models.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_models.py
new file mode 100644
index 00000000000..44d5d6314a2
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_models.py
@@ -0,0 +1,141 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods, too-many-instance-attributes
+# pylint: disable=super-init-not-called, too-many-lines
+
+from azure.core.async_paging import AsyncPageIterator
+
+from .._models import ContainerProperties, FilteredBlob
+from .._shared.response_handlers import return_context_and_deserialized, process_storage_error
+
+from .._generated.models import StorageErrorException
+from .._generated.models import FilterBlobItem
+
+
+class ContainerPropertiesPaged(AsyncPageIterator):
+ """An Iterable of Container properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A container name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.models.ContainerProperties)
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str prefix: Filters the results to return only containers whose names
+ begin with the specified prefix.
+ :param int results_per_page: The maximum number of container names to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ """
+ def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
+ super(ContainerPropertiesPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.prefix = prefix
+ self.marker = None
+ self.results_per_page = results_per_page
+ self.location_mode = None
+ self.current_page = []
+
+ async def _get_next_cb(self, continuation_token):
+ try:
+ return await self._command(
+ marker=continuation_token or None,
+ maxresults=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ async def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.prefix = self._response.prefix
+ self.marker = self._response.marker
+ self.results_per_page = self._response.max_results
+ self.current_page = [self._build_item(item) for item in self._response.container_items]
+
+ return self._response.next_marker or None, self.current_page
+
+ @staticmethod
+ def _build_item(item):
+ return ContainerProperties._from_generated(item) # pylint: disable=protected-access
+
+
+class FilteredBlobPaged(AsyncPageIterator):
+ """An Iterable of Blob properties.
+
+ :ivar str service_endpoint: The service URL.
+ :ivar str prefix: A blob name prefix being used to filter the list.
+ :ivar str marker: The continuation token of the current page of results.
+ :ivar int results_per_page: The maximum number of results retrieved per API call.
+ :ivar str continuation_token: The continuation token to retrieve the next page of results.
+ :ivar str location_mode: The location mode being used to list results. The available
+ options include "primary" and "secondary".
+ :ivar current_page: The current page of listed results.
+ :vartype current_page: list(~azure.storage.blob.BlobProperties)
+ :ivar str container: The container that the blobs are listed from.
+
+ :param callable command: Function to retrieve the next page of items.
+ :param str container: The name of the container.
+ :param int results_per_page: The maximum number of blobs to retrieve per
+ call.
+ :param str continuation_token: An opaque continuation token.
+ :param location_mode: Specifies the location the request should be sent to.
+ This mode only applies for RA-GRS accounts which allow secondary read access.
+ Options include 'primary' or 'secondary'.
+ """
+ def __init__(
+ self, command,
+ container=None,
+ results_per_page=None,
+ continuation_token=None,
+ location_mode=None):
+ super(FilteredBlobPaged, self).__init__(
+ get_next=self._get_next_cb,
+ extract_data=self._extract_data_cb,
+ continuation_token=continuation_token or ""
+ )
+ self._command = command
+ self.service_endpoint = None
+ self.marker = continuation_token
+ self.results_per_page = results_per_page
+ self.container = container
+ self.current_page = None
+ self.location_mode = location_mode
+
+ async def _get_next_cb(self, continuation_token):
+ try:
+ return await self._command(
+ marker=continuation_token or None,
+ maxresults=self.results_per_page,
+ cls=return_context_and_deserialized,
+ use_location=self.location_mode)
+ except StorageErrorException as error:
+ process_storage_error(error)
+
+ async def _extract_data_cb(self, get_next_return):
+ self.location_mode, self._response = get_next_return
+ self.service_endpoint = self._response.service_endpoint
+ self.marker = self._response.next_marker
+ self.current_page = [self._build_item(item) for item in self._response.blobs]
+
+ return self._response.next_marker or None, self.current_page
+
+ @staticmethod
+ def _build_item(item):
+ if isinstance(item, FilterBlobItem):
+ blob = FilteredBlob(name=item.name, container_name=item.container_name, tag_value=item.tag_value) # pylint: disable=protected-access
+ return blob
+ return item
diff --git a/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_upload_helpers.py b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_upload_helpers.py
new file mode 100644
index 00000000000..dcefb7d357a
--- /dev/null
+++ b/src/storage-preview/azext_storage_preview/vendored_sdks/blob/aio/_upload_helpers.py
@@ -0,0 +1,271 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=no-self-use
+
+from io import SEEK_SET, UnsupportedOperation
+from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import
+
+import six
+from azure.core.exceptions import ResourceModifiedError
+
+from .._shared.response_handlers import (
+ process_storage_error,
+ return_response_headers)
+from .._shared.uploads_async import (
+ upload_data_chunks,
+ upload_substream_blocks,
+ BlockBlobChunkUploader,
+ PageBlobChunkUploader,
+ AppendBlobChunkUploader)
+from .._shared.encryption import generate_blob_encryption_data, encrypt_blob
+from .._generated.models import (
+ StorageErrorException,
+ BlockLookupList,
+ AppendPositionAccessConditions,
+ ModifiedAccessConditions,
+)
+from .._upload_helpers import _convert_mod_error, _any_conditions
+
+if TYPE_CHECKING:
+ from datetime import datetime # pylint: disable=unused-import
+ BlobLeaseClient = TypeVar("BlobLeaseClient")
+
+
+async def upload_block_blob( # pylint: disable=too-many-locals
+ client=None,
+ data=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ headers=None,
+ validate_content=None,
+ max_concurrency=None,
+ blob_settings=None,
+ encryption_options=None,
+ **kwargs):
+ try:
+ if not overwrite and not _any_conditions(**kwargs):
+ kwargs['modified_access_conditions'].if_none_match = '*'
+ adjusted_count = length
+ if (encryption_options.get('key') is not None) and (adjusted_count is not None):
+ adjusted_count += (16 - (length % 16))
+ blob_headers = kwargs.pop('blob_headers', None)
+ tier = kwargs.pop('standard_blob_tier', None)
+ blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+ # Do single put if the size is smaller than config.max_single_put_size
+ if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size):
+ try:
+ data = data.read(length)
+ if not isinstance(data, six.binary_type):
+ raise TypeError('Blob data should be of type bytes.')
+ except AttributeError:
+ pass
+ if encryption_options.get('key'):
+ encryption_data, data = encrypt_blob(data, encryption_options['key'])
+ headers['x-ms-meta-encryptiondata'] = encryption_data
+ return await client.upload(
+ data,
+ content_length=adjusted_count,
+ blob_http_headers=blob_headers,
+ headers=headers,
+ cls=return_response_headers,
+ validate_content=validate_content,
+ data_stream_total=adjusted_count,
+ upload_stream_current=0,
+ tier=tier.value if tier else None,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+
+ use_original_upload_path = blob_settings.use_byte_buffer or \
+ validate_content or encryption_options.get('required') or \
+ blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \
+ hasattr(stream, 'seekable') and not stream.seekable() or \
+ not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+ if use_original_upload_path:
+ if encryption_options.get('key'):
+ cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key'])
+ headers['x-ms-meta-encryptiondata'] = encryption_data
+ encryption_options['cek'] = cek
+ encryption_options['vector'] = iv
+ block_ids = await upload_data_chunks(
+ service=client,
+ uploader_class=BlockBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ max_concurrency=max_concurrency,
+ stream=stream,
+ validate_content=validate_content,
+ encryption_options=encryption_options,
+ headers=headers,
+ **kwargs
+ )
+ else:
+ block_ids = await upload_substream_blocks(
+ service=client,
+ uploader_class=BlockBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ max_concurrency=max_concurrency,
+ stream=stream,
+ validate_content=validate_content,
+ headers=headers,
+ **kwargs
+ )
+
+ block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
+ block_lookup.latest = block_ids
+ return await client.commit_block_list(
+ block_lookup,
+ blob_http_headers=blob_headers,
+ cls=return_response_headers,
+ validate_content=validate_content,
+ headers=headers,
+ tier=tier.value if tier else None,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+ except StorageErrorException as error:
+ try:
+ process_storage_error(error)
+ except ResourceModifiedError as mod_error:
+ if not overwrite:
+ _convert_mod_error(mod_error)
+ raise
+
+
+async def upload_page_blob(
+ client=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ headers=None,
+ validate_content=None,
+ max_concurrency=None,
+ blob_settings=None,
+ encryption_options=None,
+ **kwargs):
+ try:
+ if not overwrite and not _any_conditions(**kwargs):
+ kwargs['modified_access_conditions'].if_none_match = '*'
+ if length is None or length < 0:
+ raise ValueError("A content length must be specified for a Page Blob.")
+ if length % 512 != 0:
+ raise ValueError("Invalid page blob size: {0}. "
+ "The size must be aligned to a 512-byte boundary.".format(length))
+ if kwargs.get('premium_page_blob_tier'):
+ premium_page_blob_tier = kwargs.pop('premium_page_blob_tier')
+ try:
+ headers['x-ms-access-tier'] = premium_page_blob_tier.value
+ except AttributeError:
+ headers['x-ms-access-tier'] = premium_page_blob_tier
+ if encryption_options and encryption_options.get('data'):
+ headers['x-ms-meta-encryptiondata'] = encryption_options['data']
+ blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+ response = await client.create(
+ content_length=0,
+ blob_content_length=length,
+ blob_sequence_number=None,
+ blob_http_headers=kwargs.pop('blob_headers', None),
+ blob_tags_string=blob_tags_string,
+ cls=return_response_headers,
+ headers=headers,
+ **kwargs)
+ if length == 0:
+ return response
+
+ kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag'])
+ return await upload_data_chunks(
+ service=client,
+ uploader_class=PageBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_page_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ encryption_options=encryption_options,
+ headers=headers,
+ **kwargs)
+
+ except StorageErrorException as error:
+ try:
+ process_storage_error(error)
+ except ResourceModifiedError as mod_error:
+ if not overwrite:
+ _convert_mod_error(mod_error)
+ raise
+
+
+async def upload_append_blob( # pylint: disable=unused-argument
+ client=None,
+ stream=None,
+ length=None,
+ overwrite=None,
+ headers=None,
+ validate_content=None,
+ max_concurrency=None,
+ blob_settings=None,
+ encryption_options=None,
+ **kwargs):
+ try:
+ if length == 0:
+ return {}
+ blob_headers = kwargs.pop('blob_headers', None)
+ append_conditions = AppendPositionAccessConditions(
+ max_size=kwargs.pop('maxsize_condition', None),
+ append_position=None)
+ blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+ try:
+ if overwrite:
+ await client.create(
+ content_length=0,
+ blob_http_headers=blob_headers,
+ headers=headers,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+ return await upload_data_chunks(
+ service=client,
+ uploader_class=AppendBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ append_position_access_conditions=append_conditions,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ if error.response.status_code != 404:
+ raise
+ # rewind the request body if it is a stream
+ if hasattr(stream, 'read'):
+ try:
+ # attempt to rewind the body to the initial position
+ stream.seek(0, SEEK_SET)
+ except UnsupportedOperation:
+ # if body is not seekable, then retry would not work
+ raise error
+ await client.create(
+ content_length=0,
+ blob_http_headers=blob_headers,
+ headers=headers,
+ blob_tags_string=blob_tags_string,
+ **kwargs)
+ return await upload_data_chunks(
+ service=client,
+ uploader_class=AppendBlobChunkUploader,
+ total_size=length,
+ chunk_size=blob_settings.max_block_size,
+ stream=stream,
+ max_concurrency=max_concurrency,
+ validate_content=validate_content,
+ append_position_access_conditions=append_conditions,
+ headers=headers,
+ **kwargs)
+ except StorageErrorException as error:
+ process_storage_error(error)
diff --git a/src/storage-preview/setup.py b/src/storage-preview/setup.py
index 2da68a6fac7..264d07437d8 100644
--- a/src/storage-preview/setup.py
+++ b/src/storage-preview/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.7.2"
+VERSION = "0.7.3"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
@@ -24,7 +24,7 @@
'License :: OSI Approved :: MIT License',
]
-DEPENDENCIES = []
+DEPENDENCIES = ['azure-core']
setup(
name='storage-preview',